60 |
61 |
80 |
81 |
82 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 | | protein1 |
96 | protein2 |
97 | neighbor hood |
98 | cooccu rrence |
99 | coexpre ssion |
100 |
101 |
102 |
103 |
104 | | ENSP273047 |
105 | ENSP261890 |
106 | 0 |
107 | 53 |
108 | 83 |
109 |
110 |
111 | | ENSP273047 |
112 | ENSP261890 |
113 | 0 |
114 | 53 |
115 | 83 |
116 |
117 |
118 | | ENSP300413 |
119 | ENSP274242 |
120 | 426 |
121 | 0 |
122 | 164 |
123 |
124 |
125 | | ENSP300413 |
126 | ENSP274242 |
127 | 426 |
128 | 0 |
129 | 164 |
130 |
131 |
132 | | ENSP300413 |
133 | ENSP274242 |
134 | 426 |
135 | 0 |
136 | 164 |
137 |
138 |
139 | | ENSP300413 |
140 | ENSP274242 |
141 | 426 |
142 | 0 |
143 | 164 |
144 |
145 |
146 | | ENSP309334 |
147 | ENSP346022 |
148 | 0 |
149 | 227 |
150 | 975 |
151 |
152 |
153 | | ENSP309334 |
154 | ENSP346022 |
155 | 0 |
156 | 227 |
157 | 975 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 |
167 |
168 |
169 |
170 |
171 |
172 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
198 |
199 |
200 |
201 |
202 |
203 |
204 |
205 |
206 |
207 |
208 |
209 |
210 |
211 |
--------------------------------------------------------------------------------
/orpheus/interface/src/cmd_parser.py:
--------------------------------------------------------------------------------
1 | import shlex
2 | import os
3 | import yaml
4 |
5 | from django.contrib import messages
6 | from main.models import CVDs, PrivateFiles, PrivateTables
7 | from django.conf import settings
8 |
9 | from orpheus.core.executor import Executor
10 | from orpheus.core.orpheus_exceptions import BadStateError, NotImplementedError, BadParametersError
11 | from orpheus.core.relation import RelationManager
12 | from db import DatabaseManager
13 | import orpheus.core.orpheus_const as const
14 | from orpheus.core.vgraph import VersionGraph
15 |
16 |
17 | class CommandNotExistError(Exception):
18 | def __init__(self, cmd):
19 | self.name = cmd
20 | def __str__(self):
21 | return "Command '%s' does not exist" % self.name
22 |
23 | class CommandInvalidError(Exception):
24 | def __init__(self, cmd):
25 | self.name = cmd
26 | def __str__(self):
27 | return "Command '%s' is not valid" % self.name
28 |
29 | class Parser(object):
30 | def __init__(self, request):
31 | self.request = request
32 | self.config_file = 'config.yaml'
33 | if 'ORPHEUS_HOME' not in os.environ:
34 | os.environ['ORPHEUS_HOME'] = os.getcwd()
35 | self.config_path = os.environ['ORPHEUS_HOME'] + '/' + self.config_file
36 | else:
37 | self.config_path = os.environ['ORPHEUS_HOME'] + self.config_file
38 | try:
39 | with open(self.config_path, 'r') as f:
40 | self.config = yaml.load(f)
41 | assert(self.config['orpheus_home'] != None)
42 |
43 | if not self.config['orpheus_home'].endswith("/"):
44 | self.config['orpheus_home'] += "/"
45 | # if user overwrite the ORPHEUS_HOME, rewrite the enviormental parameters
46 | if 'orpheus_home' in self.config:
47 | os.environ['ORPHEUS_HOME'] = self.config['orpheus_home']
48 | except (IOError, KeyError) as e:
49 | raise BadStateError("config.yaml file not found or data not clean, abort")
50 | return
51 | except AssertionError as e:
52 | raise BadStateError("orpheus_home not specified in config.yaml")
53 | return
54 | except: # unknown error
55 | raise BadStateError("Unknown error during loading the config file, abort")
56 | return
57 |
58 | # extract database related info from Django setting
59 | self.config['user'] = settings.DATABASES['default']['USER']
60 | self.config['database'] = settings.DATABASES['default']['NAME']
61 |
62 | def get_attributes(self, dataset):
63 | conn = DatabaseManager(self.config, self.request)
64 | rel = RelationManager(conn)
65 | datatable_attributes, _ = rel.get_datatable_attribute(dataset + const.DATATABLE_SUFFIX)
66 | return ",".join(datatable_attributes)
67 |
68 | def parse(self, cmd_string, explain_btn):
69 | # Preserve the string in commit command
70 | cmd = shlex.split(cmd_string)
71 | executor = Executor(self.config, self.request)
72 | if cmd[0] != "orpheus" or len(cmd) < 2:
73 | raise CommandNotExistError(cmd)
74 | return
75 | action = cmd[1]
76 | if action == "run" and explain_btn:
77 | action = "explain"
78 | elif explain_btn:
79 | messages.error(self.request, "This command could not be executed by the \'Explain\' button")
80 | return None
81 | try:
82 | if action == "init":
83 | input_file, dataset, table_name, schema = self.__parse_init(cmd)
84 |
85 | conn = DatabaseManager(self.config, self.request)
86 | executor.exec_init(input_file, dataset, table_name, schema, conn)
87 |
88 |
89 | elif action == "checkout":
90 | dataset, vlist, to_table, to_file, delimiters, header, ignore = self.__parse_checkout(cmd)
91 | conn = DatabaseManager(self.config, self.request)
92 | executor.exec_checkout(dataset, vlist, to_table, to_file, delimiters, header, ignore, conn)
93 |
94 | elif action == "commit":
95 | message, table_name, file_name, delimiters, header = self.__parse_commit(cmd)
96 | conn = DatabaseManager(self.config, self.request)
97 | parent_name, curt_vid, parent_list = executor.exec_commit(message, table_name, file_name, delimiters, header, conn)
98 |
99 |
100 | elif action == "run":
101 | sql = self.__parse_run(cmd)
102 | conn = DatabaseManager(self.config, self.request)
103 | attr_names, transactions = executor.exec_run(sql, conn)
104 | table_list = []
105 | table_list.append((attr_names, transactions))
106 | return table_list
107 | elif action == "explain":
108 | sql = self.__parse_run(cmd)
109 | conn = DatabaseManager(self.config, self.request)
110 | return executor.exec_explain(sql, conn)
111 | elif action == "drop":
112 | dataset = self.__parse_drop(cmd)
113 | conn = DatabaseManager(self.config, self.request)
114 | executor.exec_drop(dataset, conn)
115 |
116 | elif action == "show":
117 | dataset = self.__parse_show(cmd)
118 | conn = DatabaseManager(self.config, self.request)
119 | return executor.exec_show(dataset, conn)
120 | elif action == "restore":
121 | conn = DatabaseManager(self.config, self.request)
122 | executor.exec_restore(conn)
123 | else:
124 | raise CommandNotExistError(cmd_string)
125 | return
126 | except Exception as e:
127 | messages.error(self.request, str(e))
128 | return None
129 | # TODO: This simple parser does not detect invalid optional tags.
130 | # E.g. -z schema => no schema file detected. Rather, it should print out error message, no -z option
131 |
132 | # Init command
133 | # Required args: input, dataset, optional args: -t table, -s schema
134 | def __parse_init(self, cmd):
135 | try:
136 | input_file, dataset, table_name, schema = cmd[2], cmd[3], None, None
137 | if '-t' in cmd:
138 | table_name = cmd[cmd.index('-t') + 1]
139 | if '-s' in cmd:
140 | schema = cmd[cmd.index('-s') + 1]
141 | except Exception as e:
142 | raise CommandInvalidError(' '.join(cmd))
143 | return
144 | return input_file, dataset, table_name, schema
145 |
146 | # checkout command
147 | # required argument: dataset, vlist, optional args: to_table, to_file, delimiters, header, ignore
148 | def __parse_checkout(self, cmd):
149 | try:
150 | dataset, vlist = cmd[2], []
151 | to_table, to_file, delimiters, header, ignore = None, None, ',', False, False
152 | vlist_indices = [i for i, x in enumerate(cmd) if x == '-v']
153 | for i in vlist_indices:
154 | vlist.append(str(cmd[i+1]))
155 | if '-t' in cmd:
156 | to_table = cmd[cmd.index('-t') + 1]
157 | if '-f' in cmd:
158 | to_file = cmd[cmd.index('-f') + 1]
159 | if '-d' in cmd:
160 | delimiters = cmd[cmd.index('-d') + 1]
161 | if '-h' in cmd:
162 | header = True
163 | if '--ignore' in cmd:
164 | ignore = True
165 | except Exception as e:
166 | raise CommandInvalidError(' '.join(cmd))
167 | return
168 | return dataset, vlist, to_table, to_file, delimiters, header, ignore
169 |
170 | # Commit command
171 | # Required argument: -m message, optional argument: -t table_name, -f file_name, -d delimiters, -h header
172 | def __parse_commit(self, cmd):
173 | try:
174 | message = cmd[cmd.index('-m') + 1]
175 | table_name, file_name, delimiters, header = None, None, ',', False
176 | if '-t' in cmd:
177 | table_name = cmd[cmd.index('-t') + 1]
178 | if '-f' in cmd:
179 | file_name = cmd[cmd.index('-f') + 1]
180 | if '-d' in cmd:
181 | delimiters = cmd[cmd.index('-d') + 1]
182 | if '-h' in cmd:
183 | header = True
184 | except Exception as e:
185 | raise CommandInvalidError(' '.join(cmd))
186 | return
187 | return message, table_name, file_name, delimiters, header
188 |
189 | def __init_vGraph():
190 | graph = VersionGraph(self.config, self.request)
191 | try:
192 | graph.init_vGraph_json(dataset, 1) # init vid = 1
193 | except Exception as e:
194 | graph.delete_vGraph_json(dataset)
195 | raise Exception
196 | return
197 | # TODO: What about schema? Automation or specified by user?
198 |
199 |
200 | # Drop command
201 | # Required argument: dataset
202 | def __parse_drop(self, cmd):
203 | try:
204 | dataset = cmd[2]
205 | except Exception as e:
206 | raise CommandInvalidError(' '.join(cmd))
207 | return
208 | return dataset
209 |
210 | def __parse_run(self, cmd):
211 | try:
212 | sql = cmd[2]
213 | except Exception as e:
214 | raise CommandInvalidError(' '.join(cmd))
215 | return
216 | return sql
217 |
218 | # The func is the same as __parse_drop
219 | def __parse_show(self, cmd):
220 | try:
221 | dataset = cmd[2]
222 | except Exception as e:
223 | raise CommandInvalidError(' '.join(cmd))
224 | return
225 | return dataset
226 |
--------------------------------------------------------------------------------
/orpheus/clt/db.py:
--------------------------------------------------------------------------------
1 | import yaml
2 | import logging
3 | import click
4 | import psycopg2
5 | import sys
6 | import json
7 |
8 | from orpheus.core.orpheus_sqlparse import SQLParser
9 | from orpheus.core.orpheus_exceptions import BadStateError, NotImplementedError, BadParametersError
10 | import orpheus.core.orpheus_const as const
11 | from orpheus.core.db import UserNotSetError, ConnectionError, OperationError, DatasetExistsError, SQLSyntaxError
12 |
13 | class DatabaseManager():
14 | def __init__(self, config):
15 | # yaml config passed from ctx
16 | try:
17 | self.verbose = False
18 | self.connect = None
19 | self.cursor = None
20 | self.config = config
21 | logging.basicConfig(filename=config['log_path'], format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S ')
22 | self.user_log = open(config['user_log'], 'a')
23 | self.home = config['orpheus_home']
24 | self.currentDB = config['database']
25 | self.user = config['user']
26 | self.password = config['passphrase']
27 | self.connect_str = "host=" + self.config['host'] + " port=" + str(self.config['port']) + " dbname=" + self.currentDB + " user=" + self.user + " password=" + self.password
28 | self.connect_db()
29 | except KeyError as e:
30 | raise BadStateError("context missing field %s, abort" % e.args[0])
31 |
32 |
33 | def connect_db(self):
34 | print "Connecting to the database [%s] ..." % self.currentDB
35 | try:
36 | if self.verbose:
37 | click.echo('Trying to connect to %s' % (self.currentDB))
38 | logging.info('Trying to connect to %s' % (self.currentDB))
39 | self.connect = psycopg2.connect(self.connect_str)
40 | self.cursor = self.connect.cursor()
41 | except psycopg2.OperationalError as e:
42 | logging.error('%s is not open' % (self.currentDB))
43 | # click.echo(e, file=sys.stderr)
44 | raise ConnectionError("Cannot connect to the database [%s] @ [%s]:[%s]. Check connection, username, password and database name." % (self.currentDB, self.config['host'], self.config['port']))
45 | return self
46 |
47 | def execute_sql(self, sql):
48 | try:
49 | self.cursor.execute(sql)
50 | if SQLParser.is_select(sql): #return records
51 | colnames = [desc[0] for desc in self.cursor.description]
52 | print ', '.join(colnames)
53 | for row in self.cursor.fetchall():
54 | print ', '.join(str(e) for e in row)
55 | else:
56 | print self.cursor.statusmessage
57 | self.connect.commit() # commit UPDATE/INSERT messages
58 |
59 | except psycopg2.ProgrammingError:
60 | raise SQLSyntaxError()
61 |
62 | def refresh_cursor(self):
63 | self.connect = psycopg2.connect(self.connect_str)
64 | self.cursor = self.connect.cursor()
65 |
66 |
67 | # schema is a list of tuple of (attribute_name, attribute_type) as string
68 | def create_dataset(self, inputfile, dataset, schema, header=False, attributes=None):
69 | self.refresh_cursor()
70 | print "Creating the dataset [%s] to the database [%s] ..." % (dataset, self.currentDB)
71 | # create a schema (in postgres) to store user specific information
72 | try:
73 | self.cursor.execute("CREATE SCHEMA IF NOT EXISTS %s ;" % self.user)
74 | self.cursor.execute("CREATE TABLE IF NOT EXISTS %s (dataset_name text primary key);" % (self.user + '.datasets'))
75 | except psycopg2.ProgrammingError:
76 | # this is ok since table has been created before
77 | self.refresh_cursor()
78 |
79 |
80 | try:
81 | # add current dataset name into user.datasets
82 | self.cursor.execute("INSERT INTO %s values('%s');" % (self.user + '.datasets', dataset))
83 | except psycopg2.IntegrityError: # happens when inserting duplicate key
84 | raise DatasetExistsError(dataset, self.user)
85 | return
86 |
87 | try:
88 | # for each dataset, create 3 tables
89 | # dataset_datatable, which includes all records, rid as PK, based on schema
90 | # dataset_version, which keep track of all version information, like version
91 | # dataset_indexTbl, which includes all the vid and rid mapping, like indexTbl
92 |
93 |
94 | if '.csv' not in inputfile:
95 | # TODO: finish other input later
96 | raise NotImplementedError("Loading other than CSV file not implemented!")
97 | return
98 |
99 | if not attributes:
100 | raise NotImplementedError("Attributes inferreing not implemented!")
101 | return
102 |
103 | # create cvd into public schema
104 | #TODO: change to private schema in later version
105 |
106 | print "Creating the data table using the schema provided ..."
107 | # create datatable
108 | self.cursor.execute("CREATE TABLE %s (rid serial primary key, \
109 | %s);" % (const.PUBLIC_SCHEMA + dataset + const.DATATABLE_SUFFIX, ",".join(map(lambda (attribute_name, attribute_type) : attribute_name + " " + attribute_type, schema))))
110 |
111 | print "Creating the version table ..."
112 | # create version table
113 | self.cursor.execute("CREATE TABLE %s(vid int primary key, \
114 | author text, \
115 | num_records int, \
116 | parent integer[], \
117 | children integer[], \
118 | create_time timestamp, \
119 | commit_time timestamp, \
120 | commit_msg text);" % (const.PUBLIC_SCHEMA + dataset + const.VERSIONTABLE_SUFFIX))
121 |
122 | print "Creating the index table ..."
123 | # create indexTbl table
124 | self.cursor.execute("CREATE TABLE %s (vid int primary key, \
125 | rlist integer[]);" % (const.PUBLIC_SCHEMA + dataset + const.INDEXTABLE_SUFFIX))
126 |
127 | # dump data into this dataset
128 | file_path = self.config['orpheus_home'] + inputfile
129 | if header:
130 | self.cursor.execute("COPY %s (%s) FROM '%s' DELIMITER ',' CSV HEADER;" % (const.PUBLIC_SCHEMA + dataset + const.DATATABLE_SUFFIX, ",".join(attributes), file_path))
131 | else:
132 | self.cursor.execute("COPY %s (%s) FROM '%s' DELIMITER ',' CSV;" % (const.PUBLIC_SCHEMA + dataset + const.DATATABLE_SUFFIX, ",".join(attributes), file_path))
133 |
134 |
135 | self.connect.commit()
136 | except Exception as e:
137 | raise OperationError()
138 | return
139 |
140 | def drop_dataset(self, dataset):
141 | self.refresh_cursor()
142 | # TODO: refactor for better approach?
143 | try:
144 | self.cursor.execute("DROP table %s;" % (const.PUBLIC_SCHEMA + dataset + const.DATATABLE_SUFFIX))
145 | self.connect.commit()
146 | except:
147 | self.refresh_cursor()
148 |
149 | try:
150 | self.cursor.execute("DROP table %s;" % (const.PUBLIC_SCHEMA + dataset + const.VERSIONTABLE_SUFFIX))
151 | self.connect.commit()
152 | except:
153 | self.refresh_cursor()
154 |
155 | try:
156 | self.cursor.execute("DROP table %s;" % (const.PUBLIC_SCHEMA + dataset + const.INDEXTABLE_SUFFIX))
157 | self.connect.commit()
158 | except:
159 | self.refresh_cursor()
160 | try:
161 | self.cursor.execute("DELETE from %s where dataset_name = '%s';" % (self.user + ".datasets", dataset))
162 | self.connect.commit()
163 | except:
164 | self.refresh_cursor()
165 |
166 | self.connect.commit()
167 | return
168 |
169 | def list_dataset(self):
170 | self.refresh_cursor()
171 | try:
172 | self.cursor.execute("SELECT * from %s;" % (self.user + '.datasets'))
173 | return [x[0] for x in self.cursor.fetchall()]
174 | except psycopg2.ProgrammingError:
175 | raise BadStateError("No dataset has been initialized before, try init first")
176 | return
177 |
178 | def show_dataset(self, dataset):
179 | self.refresh_cursor()
180 | raise NotImplementedError("Show a specified dataset not implemented!")
181 | return
182 |
183 |
184 | @classmethod
185 | def load_config(cls):
186 | try:
187 | with open('config.yaml', 'r') as f:
188 | obj = yaml.load(f)
189 | except IOError:
190 | raise BadStateError("config.yaml file not found or data not clean, abort")
191 | return None
192 | return obj
193 |
194 | @classmethod
195 | def create_user(cls, user, password, db):
196 | # Create user in the database
197 | # Using corresponding SQL or prostegres commands
198 | # Set one-time only connection to the database to create user
199 | try:
200 | server_config = cls.load_config()
201 | conn_string = "host=" + server_config['host'] + " port=" + str(server_config['port']) + " dbname=" + db
202 | connect = psycopg2.connect(conn_string)
203 | cursor = connect.cursor()
204 | # passphrase = EncryptionTool.passphrase_hash(password)
205 | cursor.execute("CREATE USER %s SUPERUSER;" % user) # TODO: add password detection later
206 | connect.commit()
207 | except psycopg2.OperationalError:
208 | raise ConnectionError("Cannot connect to %s at %s:%s" % (db, server_config['host'], str(server_config['port'])))
209 | except Exception as e: # unknown error
210 | raise e
211 | return
212 |
213 |
214 |
215 |
--------------------------------------------------------------------------------
/orpheus/clt/click_entry.py:
--------------------------------------------------------------------------------
1 | import os
2 | import yaml
3 | import click
4 |
5 | from orpheus.core.executor import Executor
6 | from orpheus.core.user_control import UserManager
7 | from orpheus.core.orpheus_exceptions import BadStateError, NotImplementedError, BadParametersError
8 | from orpheus.core.orpheus_sqlparse import SQLParser
9 | from db import DatabaseManager
10 |
11 | class Context():
12 | def __init__(self):
13 | self.config_file = 'config.yaml'
14 | if 'ORPHEUS_HOME' not in os.environ:
15 | os.environ['ORPHEUS_HOME'] = os.getcwd()
16 | self.config_path = os.environ['ORPHEUS_HOME'] + '/' + self.config_file
17 | try:
18 | with open(self.config_path, 'r') as f:
19 | self.config = yaml.load(f)
20 |
21 | assert(self.config['orpheus_home'] != None)
22 |
23 | if not self.config['orpheus_home'].endswith("/"):
24 | self.config['orpheus_home'] += "/"
25 | # if user overwrite the ORPHEUS_HOME, rewrite the enviormental parameters
26 | if 'orpheus_home' in self.config:
27 | os.environ['ORPHEUS_HOME'] = self.config['orpheus_home']
28 | except (IOError, KeyError) as e:
29 | raise BadStateError("config.yaml file not found or data not clean, abort")
30 | return
31 | except AssertionError as e:
32 | raise BadStateError("orpheus_home not specified in config.yaml")
33 | return
34 | except: # unknown error
35 | raise BadStateError("Unknown error during loading the config file, abort")
36 | return
37 |
38 |
39 | @click.group()
40 | @click.pass_context
41 | def cli(ctx):
42 | try:
43 | ctx.obj = Context().config #Orpheus context obj
44 | user_obj = UserManager.get_current_state()
45 | for key in user_obj:
46 | ctx.obj[key] = user_obj[key]
47 | except Exception as e:
48 | click.secho(str(e), fg='red')
49 |
50 | @cli.command()
51 | @click.option('--database', prompt='Enter database name', help='Specify the database name that you want to configure to.')
52 | @click.option('--user', prompt='Enter user name', help='Specify the user name that you want to configure to.')
53 | @click.option('--password', prompt=True, hide_input=True, help='Specify the password.', default='')
54 | @click.pass_context
55 | def config(ctx, user, password, database):
56 | newctx = ctx.obj # default
57 |
58 | try:
59 | newctx['database'] = database
60 | newctx['user'] = user
61 | newctx['passphrase'] = password
62 | conn = DatabaseManager(newctx)
63 | except Exception as e:
64 | click.secho(str(e), fg='red')
65 | return
66 |
67 | try:
68 | UserManager.create_user(user, password)
69 | if UserManager.verify_credential(user, password):
70 | UserManager.create_user(user, password)
71 | from orpheus.core.encryption import EncryptionTool
72 | newctx['passphrase'] = EncryptionTool.passphrase_hash(password)
73 | UserManager.write_current_state(newctx) # pass down to user manager
74 | click.echo('Logged to the database [%s] as [%s] ' % (ctx.obj['database'],ctx.obj['user']))
75 | except Exception as e:
76 | click.secho(str(e), fg='red')
77 |
78 |
79 | @cli.command()
80 | @click.pass_context
81 | def create_user(ctx):
82 | # check this user has permission to create new user or not
83 | # create user in UserManager
84 | if not ctx.obj['user'] or not ctx.obj['database']:
85 | click.secho("No session in use, please call config first", fg='red')
86 | return # stop the following commands
87 |
88 | user = click.prompt('Please enter user name')
89 | password = click.prompt('Please enter password', hide_input=True, confirmation_prompt=True)
90 |
91 | click.echo("Creating user into database [%s]" % ctx.obj['database'])
92 | try:
93 | DatabaseManager.create_user(user, password, ctx.obj['database']) #TODO: need revise
94 | UserManager.create_user(user, password)
95 | click.echo('User created.')
96 | except Exception as e:
97 | click.secho(str(e), fg='red')
98 |
99 | # TODO: check permission?
100 |
101 | @cli.command()
102 | @click.pass_context
103 | def whoami(ctx):
104 | if not ctx.obj['user'] or not ctx.obj['database']:
105 | click.secho("No session in use, please call config first", fg='red')
106 | return # stop the following commands
107 |
108 | click.echo('Logged to the database [%s] as [%s] ' % (ctx.obj['database'],ctx.obj['user']))
109 |
110 |
111 | @cli.command()
112 | @click.argument('input_file', type=click.Path(exists=True))
113 | @click.argument('dataset')
114 | @click.option('--table_name', '-t', help='Create the dataset with existing table schema')
115 | @click.option('--schema', '-s', help='Create the dataset with schema file', type=click.Path(exists=True))
116 | @click.pass_context
117 | def init(ctx, input_file, dataset, table_name, schema):
118 | # TODO: add header support
119 | # By default, we connect to the database specified in the -config- command earlier
120 |
121 | # Two cases need to be taken care of:
122 | # 1.add version control on an outside file
123 | # 1.1 Load a csv or other format of the file into DB
124 | # 1.2 Schema
125 | # 2.add version control on a existing table in DB
126 | executor = Executor(ctx.obj)
127 | conn = DatabaseManager(ctx.obj)
128 | executor.exec_init(input_file, dataset, table_name, schema, conn)
129 |
130 | @cli.command()
131 | @click.argument('dataset')
132 | @click.pass_context
133 | def drop(ctx, dataset):
134 | if click.confirm('Are you sure you want to drop %s?' % dataset):
135 | try:
136 | conn = DatabaseManager(ctx.obj)
137 | click.echo("Dropping dataset [%s] ..." % dataset)
138 | executor = Executor(ctx.obj)
139 | executor.exec_drop(dataset, conn)
140 | except Exception as e:
141 | click.secho(str(e), fg='red')
142 |
143 |
144 | @cli.command()
145 | @click.option('--dataset', '-d', help='Specify the dataset to show')
146 | @click.option('--table_name', '-t', help='Specify the table to show')
147 | @click.pass_context
148 | def ls(ctx, dataset, table_name):
149 | # if no dataset specified, show the list of dataset the current user owns
150 | try:
151 | conn = DatabaseManager(ctx.obj)
152 | print "The current database contains the following CVDs:"
153 | if not dataset:
154 | click.echo("\n".join(conn.list_dataset()))
155 | else:
156 | click.echo(conn.show_dataset(dataset))
157 |
158 | # when showing dataset, chop off rid
159 | except Exception as e:
160 | click.secho(str(e), fg='red')
161 |
162 |
163 | # the call back function to execute file
164 | # execute line by line
165 | def execute_sql_file(ctx, param, value):
166 | if not value or ctx.resilient_parsing:
167 | return
168 | # value is the relative path of file
169 | conn = DatabaseManager(ctx.obj)
170 | parser = SQLParser(conn)
171 | abs_path = ctx.obj['orpheus_home'] + value
172 | click.echo("Executing SQL file at %s" % value)
173 | with open(abs_path, 'r') as f:
174 | for line in f:
175 | executable_sql = parser.parse(line)
176 | #print executable_sql
177 | ctx.exit()
178 |
179 | @cli.command()
180 | @click.option('--file', '-f', callback=execute_sql_file, expose_value=False, is_eager=True, type=click.Path(exists=True))
181 | @click.option('--sql', prompt="Input sql statement")
182 | @click.pass_context
183 | def run(ctx, sql):
184 | # TODO: add finer grained try-catch for SQLParser
185 | try:
186 | # execute_sql_line(ctx, sql)
187 | conn = DatabaseManager(ctx.obj)
188 | parser = SQLParser(conn)
189 | executable_sql = parser.parse(sql)
190 | # print executable_sql
191 | conn.execute_sql(executable_sql)
192 |
193 | except Exception as e:
194 | import traceback
195 | traceback.print_exc()
196 | click.secho(str(e), fg='red')
197 |
198 | @cli.command()
199 | @click.argument('dataset')
200 | @click.option('--vlist', '-v', multiple=True, required=True, help='Specify version you want to checkout, use multiple -v for multiple version checkout')
201 | @click.option('--to_table', '-t', help='Specify the table name to checkout to.')
202 | @click.option('--to_file', '-f', help='Specify the location of file')
203 | @click.option('--delimiters', '-d', default=',', help='Specify the delimiter used for checkout file')
204 | @click.option('--header', '-h', is_flag=True, help="If set, the first line of checkout file will be the header")
205 | @click.option('--ignore/--no-ignore', default=False, help='If set, checkout versions into table will ignore duplicated key')
206 | @click.pass_context
207 | def checkout(ctx, dataset, vlist, to_table, to_file, delimiters, header, ignore):
208 | conn = DatabaseManager(ctx.obj)
209 | executor = Executor(ctx.obj)
210 | executor.exec_checkout(dataset, vlist, to_table, to_file, delimiters, header, ignore, conn)
211 |
212 |
213 | @cli.command()
214 | @click.option('--msg','-m', help='Commit message', required = True)
215 | @click.option('--table_name','-t', help='The table to be committed') # changed to optional later
216 | @click.option('--file_name', '-f', help='The file to be committed', type=click.Path(exists=True))
217 | @click.option('--delimiters', '-d', default=',', help='Specify the delimiters used for checkout file')
218 | @click.option('--header', '-h', is_flag=True, help="If set, the first line of checkout file will be the header")
219 | @click.pass_context
220 | def commit(ctx, msg, table_name, file_name, delimiters, header):
221 |
222 | conn = DatabaseManager(ctx.obj)
223 | executor = Executor(ctx.obj)
224 | executor.exec_commit(msg, table_name, file_name, delimiters, header, conn)
225 |
226 | @cli.command()
227 | @click.pass_context
228 | def clean(ctx):
229 | config = ctx.obj
230 | open(config['meta_info'], 'w').close()
231 | f = open(config['meta_info'], 'w')
232 | f.write('{"file_map": {}, "table_map": {}, "table_created_time": {}, "merged_tables": []}')
233 | f.close()
234 | click.echo("meta_info cleaned")
235 | open(config['meta_modifiedIds'], 'w').close()
236 | f = open(config['meta_modifiedIds'], 'w')
237 | f.write('{}')
238 | f.close()
239 | click.echo("modifiedID cleaned")
240 |
--------------------------------------------------------------------------------
/orpheus/core/orpheus_sqlparse.py:
--------------------------------------------------------------------------------
1 | # this class uses the sqlparse library to extract the semantics of OrpheusDB SQL statement
2 |
3 | import sqlparse, re
4 | from sqlparse.sql import Identifier, Token, Where
5 | from sqlparse.tokens import DML
6 |
7 | import orpheus_const as const
8 | from relation import RelationManager
9 | from collections import defaultdict
10 |
11 | class InvalidSyntaxError(Exception):
12 | def __init__(self, statement):
13 | self.statement = statement
14 | def __str__(self):
15 | return "Error parsing '%s'" % self.statement
16 |
17 |
18 | class SQLParser(object):
19 |
20 | def __init__(self, conn):
21 | self.conn = conn
22 | self.relation = RelationManager(self.conn)
23 | self.reserved_column_names = ['cvd']
24 |
25 | def construct_identifier(self, content):
26 | return Identifier([Token('', content)])
27 |
28 | def get_fields_mapping(self, attributes):
29 | # mapping from attribute name to corresponding table
30 | # by default, d = datatable, i = indextable, v = versiontable
31 | fields_mapping = {'vid' : 'i'}
32 | for attribute in attributes:
33 | fields_mapping[attribute] = 'd'
34 |
35 | versiontable_attributes = ["author", "num_records", "parent", "children", "create_time", "commit_time", "commit_msg"]
36 | # take in version table attributes
37 | for version_attribute in versiontable_attributes:
38 | fields_mapping[version_attribute] = 'v'
39 |
40 | return fields_mapping
41 |
42 | def get_touched_table(self, touched_columns, fields_mapping):
43 | touched_table = set()
44 | for column in touched_columns.keys():
45 | try:
46 | touched_table.add(fields_mapping[column])
47 | except KeyError:
48 | pass # user defined alias
49 | return touched_table
50 |
51 | # anything in this parsed statement
52 | def get_touched_column_names(self, parent, stop_words=set()):
53 | tokens = parent.flatten()
54 | column_names = defaultdict(list)
55 | for token in tokens:
56 | if token.ttype == sqlparse.tokens.Name:
57 | # this is a column
58 | column_value = token.value
59 | if column_value not in stop_words:
60 | token_parent = token.parent
61 | token_index = token_parent.token_index(token)
62 | column_names[column_value].append((token_parent, token_index))
63 | return column_names
64 |
65 | # return replaced from clause
66 | def get_from_clause(self, dataset_name, touched_table):
67 | # rule based !
68 | datatable = dataset_name + const.DATATABLE_SUFFIX
69 | indextable = dataset_name + const.INDEXTABLE_SUFFIX
70 | versiontable = dataset_name + const.VERSIONTABLE_SUFFIX
71 | if 'd' in touched_table and 'i' in touched_table:
72 | return "%s, %s" % (datatable + ' d', indextable + ' i')
73 | elif 'v' in touched_table and 'i' in touched_table:
74 | return "%s, %s" % (versiontable + ' v', indextable + ' i')
75 | elif 'd' in touched_table and len(touched_table) == 1: # meaning there is only datatable attributes are touched
76 | return "%s" % datatable + ' d'
77 | elif 'v' in touched_table and len(touched_table) == 1: # meaning there is only versiontable attributes are touched
78 | return "%s" % versiontable + ' v'
79 | else:
80 | return "%s, %s, %s" % (versiontable + ' v', indextable + ' i', datatable + ' d')
81 |
82 |
83 | def get_where_clause(self, touched_table):
84 | # rule based!
85 | if 'd' in touched_table and 'i' in touched_table:
86 | return "d.rid = ANY(i.rlist)"
87 | else:
88 | return None
89 |
90 |
91 | # return the first occurrence of versions (1,2), OF cvd (ds1)
92 | def get_dataset_name_and_versions(self, parent):
93 | tokens = list(parent.flatten())
94 | parent, dataset_name, version_idx, vlist = None, None, None, None
95 | for i,token in enumerate(tokens):
96 | if token.value == 'version':
97 | parent = token.parent
98 | while type(parent) != sqlparse.sql.Parenthesis and type(parent) != sqlparse.sql.Statement:
99 | # stops when we find a handle to either () or statement
100 | token = parent
101 | parent = parent.parent # traverse up tree
102 | version_idx = parent.token_index(token)
103 | break
104 |
105 | vlist = parent.tokens[version_idx + 2].value
106 | dataset_name = parent.tokens[version_idx + 6].value.split()[-1]
107 | return vlist, dataset_name, parent, version_idx
108 |
109 | # find the first occurrence of CVD, return the name of CVD, its handle and index
110 | def find_cvd_handle(self, parent):
111 | tokens = list(parent.flatten())
112 | parent, dataset_name, cvd_index = None, None, None
113 | for i,token in enumerate(tokens):
114 | if token.value == 'cvd':
115 | # found the clause, need to find its parent handle
116 | parent = token.parent
117 | dataset_name = tokens[i + 2].value
118 | while type(parent) != sqlparse.sql.Parenthesis and type(parent) != sqlparse.sql.Statement:
119 | # stops when we find a handle to either () or statement
120 | token = parent
121 | parent = parent.parent # traverse up tree
122 | cvd_index = parent.token_index(token)
123 | break
124 | return dataset_name, parent, cvd_index
125 |
126 | @classmethod
127 | def is_select(cls, raw_sql):
128 | parsed = sqlparse.parse(raw_sql)[0]
129 | item = parsed.tokens[0]
130 | if item.ttype is DML and item.value.upper() == 'SELECT':
131 | return True
132 | return False
133 |
134 |
135 |
136 | # find the Where clause index under parent.tokens
137 | def find_where_index(self, parent):
138 | lis = parent.tokens
139 | for i, token in enumerate(lis):
140 | if type(token) is sqlparse.sql.Where:
141 | return i
142 | return -1
143 |
144 | # find the place in parent to insert WHERE clause
145 | def find_where_insert(self, parent):
146 | lis = parent.tokens
147 | for i,token in enumerate(lis):
148 | if token.value == 'group' or token.value == 'order' or token.value == 'limit':
149 | return i - 1 # anything that before group by or order by, -1 for the space
150 | return len(lis) if lis[-1].value != ')' and lis[-1].value != ';' else len(lis) - 1
151 |
152 |
153 | # version known, replace the tokens
154 | def replace_known_version(self, dataset_name, vlist, parent, version_idx):
155 | # if parent has where, append a new where
156 | datatable = dataset_name + const.DATATABLE_SUFFIX
157 | indextable = dataset_name + const.INDEXTABLE_SUFFIX
158 | rlist = self.relation.select_records_of_version_list(vlist.split(','), indextable)
159 | constraint = "rid = ANY('%s'::int[])" % rlist
160 |
161 | # replace the FROM clause
162 | parent.tokens = parent.tokens[:version_idx] + [self.construct_identifier(datatable)] + parent.tokens[version_idx + 7:]
163 |
164 | # replace the WHERE clause
165 | where_indx = self.find_where_index(parent)
166 | if where_indx < 0:
167 | new_idex = self.find_where_insert(parent) # find the place to insert new where
168 | parent.insert_before(new_idex, self.construct_identifier(" where " + constraint))
169 | else:
170 | where_token = parent.tokens[where_indx]
171 | where_token = parent.tokens[where_indx]
172 | new_idex = self.find_where_insert(where_token)
173 | where_token.insert_before(new_idex, self.construct_identifier(" and " + constraint))
174 |
175 |
176 | def replace_unknown_version(self, parent, cvd_idx, dataset_name, fields_mapping, touched_column_names):
177 | # find touched tables
178 | touched_table = self.get_touched_table(touched_column_names, fields_mapping)
179 | table_constraint = self.get_from_clause(dataset_name, touched_table)
180 |
181 | # replace the from clause
182 | parent.tokens = parent.tokens[:cvd_idx] + [self.construct_identifier(table_constraint)] + parent[cvd_idx+1:]
183 |
184 | where_constraint = self.get_where_clause(touched_table)
185 |
186 | # replace the where clause if needed
187 | if where_constraint:
188 | where_indx = self.find_where_index(parent)
189 | if where_indx < 0:
190 | # no where, needs to add
191 | new_idex = self.find_where_insert(parent) # find the place to insert new where
192 | parent.insert_before(new_idex, self.construct_identifier(" where " + where_constraint))
193 | else:
194 | where_token = parent.tokens[where_indx]
195 | where_token.tokens.extend(self.construct_identifier(" and " + where_constraint + " "))
196 |
197 | # print touched_column_names
198 |
199 | # replace all the touched columns by prefix a alias
200 | for column in touched_column_names.keys():
201 | for (column_parent, column_idx) in touched_column_names[column]:
202 | if column in fields_mapping: # only those we found in tables
203 | if '.' in column_parent.value:
204 | continue
205 | # replace them
206 | mapped_table_alias = fields_mapping[column]
207 | column_parent.tokens = column_parent.tokens[:column_idx] + [self.construct_identifier("%s.%s" % (mapped_table_alias, column))] + column_parent.tokens[column_idx + 1:]
208 |
209 |
210 |
211 | # main method to parse
212 | # all char casted to lower case
213 | def parse(self, raw_sql):
214 | relation = RelationManager(self.conn)
215 | line = raw_sql.lower()
216 | try:
217 | while 1:
218 | # two cases
219 | # 1. version is specified, version 1,2 from cvd ds1
220 | # 2. version is not specified, from CVD
221 | # TODO: add more cases?
222 | version_specified_re = re.compile('.*?from\sversion\s(\d+|\d+(,\d+)+)\sof\scvd\s(\w+);?')
223 | version_matched = version_specified_re.match(line)
224 | if version_matched: # found case 1
225 | # vlist = version_matched.group(1) # list of version separted by comma
226 | # dataset_name = version_matched.group(3) # whatever after keyword CVD
227 | parsed_statement = sqlparse.parse(line)[0]
228 | vlist, dataset_name, parent, version_idx = self.get_dataset_name_and_versions(parsed_statement)
229 | self.replace_known_version(dataset_name, vlist, parent, version_idx)
230 | line = str(parsed_statement)
231 | continue
232 | version_unknown_re = re.compile('.*from\scvd\s(\w+);?')
233 | version_unknown_matched = version_unknown_re.match(line)
234 | if version_unknown_matched: # found case 2
235 | parsed_statement = sqlparse.parse(line)[0]
236 | dataset_name, parent, cvd_idx = self.find_cvd_handle(parsed_statement)
237 |
238 | datatable_attributes, _ = self.relation.get_datatable_attribute(dataset_name + const.DATATABLE_SUFFIX)
239 |
240 | # get the mapping from each field to alias
241 | fields_mapping = self.get_fields_mapping(datatable_attributes)
242 |
243 | #print fields_mapping
244 |
245 | touched_column_names = self.get_touched_column_names(parent, stop_words=set(self.reserved_column_names + [dataset_name]))
246 |
247 | # print touched_column_names
248 |
249 | self.replace_unknown_version(parent, cvd_idx, dataset_name, fields_mapping, touched_column_names)
250 |
251 | line = str(parsed_statement)
252 | continue
253 |
254 | # either no keyword found or all resolved
255 | break
256 | # print parsed_statement
257 | return line
258 |
259 | except:
260 | import traceback
261 | traceback.print_exc()
262 | raise InvalidSyntaxError(raw_sql)
263 | return
--------------------------------------------------------------------------------
/orpheus/interface/main/templates/main/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | {% load static %}
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
OrpheusDB Demonstration
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
34 |
35 |
36 |
37 |
38 |
59 |
60 |
61 |
62 |
98 |
99 |
100 |
110 |
111 |
112 |
113 |
114 |
115 | {% if messages %}
116 |
117 | {% for message in messages %}
118 |
119 |
124 |
125 |
126 | {% if message.tags == 'success' %}
127 |
{{ message| linebreaks }}
128 | {% else %}
129 |
{{ message| linebreaks }}
130 | {% endif %}
131 |
132 | {% endfor %}
133 |
134 | {% endif %}
135 |
136 | {% for attr_names, transactions in table_list %}
137 |
138 | {% if attr_names %}
139 |
140 |
141 | {% for attr in attr_names %}
142 | | {{attr}} |
143 | {% endfor %}
144 |
145 |
146 | {% endif %}
147 |
148 | {% if transactions %}
149 |
150 | {% for row in transactions %}
151 |
152 | {% for col in row %}
153 | | {{col}} |
154 | {% endfor %}
155 |
156 | {% endfor %}
157 |
158 | {% endif %}
159 |
160 |
161 | {% endfor %}
162 |
163 |
164 |
165 |
166 |
217 |
218 |
219 |
221 |
222 |
223 |
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # OrpheusDB: Bolt-On Versioning for Relational Databases
2 | [OrpheusDB][orpheus] is a hosted system that supports _relational dataset version management_. OrpheusDB is built on top of standard relational databases, thus it inherits much of the same benefits of relational databases, while also compactly storing, tracking, and recreating versions on demand, all very efficiently.
3 |
4 | OrpheusDB is built using [PostgreSQL][postgressite] and [Click][clicksite], a command line tool written in Python. Our current version supports advanced querying capabilities, using both the git-style version control commands, as well as SQL queries on one or more dataset versions. The paper describing the design, functionality, optimization, and performance evaluation can be found [at this link][papersite].
5 |
6 | OrpheusDB is a multi-year project, supported by the National Science Foundation via award number 1513407. It shares the vision of the [DataHub][datahub] project in supporting collaborative data analytics.
7 |
8 |
9 |
10 |
11 |
12 | ### Version
13 | The current version is 1.0.0 (Released January 1, 2017).
14 |
15 |
16 | ### Key Design Innovations
17 | * OrpheusDB is built on top of a traditional relational database, thus it inherits all of the standard benefits of relational database systems "for free"
18 | * OrpheusDB supports advanced querying and versioning capabilities, via both SQL queries and git-style version control commands.
19 | * OrpheusDB uses a sophisticated data model, coupled with partition optimization algorithms
1, to provide efficient version control performance over large-scale datasets.
20 |
21 | ### Dataset Version Control in OrpheusDB
22 | The fundamental unit of storage within OrpheusDB is a _collaborative versioned dataset (CVD)_ to which one or more users can contribute,
23 | representing a collection of versions of a single relational dataset, with a fixed schema. There is a many-to-many relationship between records in the relation and versions that are captured within the CVD: each record can belong to many versions, and each version can contain many records.
24 |
25 |
26 | Users can operate on CVDs much like they would with source code version control. The _checkout_ command allows users to materialize one or more specific versions of a CVD as a newly created regular table within a relational database or as a csv file; the _commit_ command allows users to add a new version to a CVD by making the local changes made by the user on their materialized table or on their exported csv file visible to others. Other git-style commands we support include _init_, _create\_user_, _config_, _whoami_, _ls_, _drop_, and _optimize_.
27 |
28 | Users can also execute SQL queries on one or more relational dataset versions within a CVD via the command line using the _run_ command, without requiring the corresponding dataset versions to be materialized. Beyond executing queries on a small number of versions, users can also apply aggregation grouped by version ids, or identify versions that satisfy some property.
29 |
30 |
31 |
32 | ### Data Model
33 | Each CVD in OrpheusDB corresponds to three underlying relational tables: the _data_ table, the _index_ table, and the _version_ table. To capture dataset versions, we represent the records of a dataset in the _data_ table and mapping between versions and records in the _index_ table. Finally, we store version-level provenance information in the _version_ table, including attributes such as `author`, `num_records`, `parent`, `children`, `create_time`, `commit_time`, and `commit_msg`.
34 |
35 |
36 |
37 | Our experimental evaluation demonstrates that, compared to other alternative data models, our data model, coupled with the partition optimizer results in **10x** less storage consumption, **1000x** less time for _commit_ and comparable query performance for the _checkout_ command. In other words, OrpheusDB acheives an efficient balance between storage consumption and query latencies.
38 |
39 | ### System Requirement
40 | OrpheusDB requires the following software to be installed successfully prior to setup:
41 | * Python 2.7.x
42 | * PostgreSQL >= 9.5
43 |
44 | ### Installation Instructions
45 | OrpheusDB comes with a standard `setup.py` script for installation. The required python dependency packages include
46 | * click >= 6.6
47 | * psycopy2 >= 2.6.2
48 | * pandas >= 0.19.0
49 | * pyyaml >= 3.12
50 | * pyparsing >=2.1.1
51 | * sqlparse >= 0.2.2
52 |
53 | Users are able to install any of missing dependencies themselves via `pip`. Alternatively, an easier way to install all the requisite dependencies is via `pip install .` (If you encounter permission errors, install via `sudo -H pip install .`)
54 |
55 | After installation, users can use `orpheus --help` to list all the available commands in OrpheusDB. By default, `orpheus` is the alias for OrpheusDB user interface.
56 |
57 |
63 |
64 | ### Configuration
65 | To start with, users need to install PostgresSQL successfully. (A tutorial of installing PostgresSQL on Mac OSX can be found [here][postgres-installation].) After installing, and then starting PostgresSQL (e.g., via `pg_ctl`), users can call `createdb` to create a new database with a new username and password, all under the current user login. Remember the username and password, the parameters of the new database, and other details of the PostgreSQL setup. Once the configuration is complete, edit the appropriate entries in the file `config.yaml`.
66 |
67 | ### User Tutorials
68 | To start working on versioned datasets, users need to run `orpheus config` to set up OrpheusDB for the given user. To start off, use ths same username that was used during the PostgreSQL configuration -- this will initialize a OrpheusDB user with the same username. Following that, users can create new OrpheusDB usernames via the `create_user` command. Upon finishing, this new username will be pushed to the underlying data storage with a SUPERUSER privilege. Command `config` can also be used to login through created username and `whoami` is used to list the current username that is currently logged in.
69 |
70 | Please note here that OrpheusDB provides the most basic implementation for user information, i.e. there is no password protection. However, this feature is subject to change in future versions.
71 | ```
72 | orpheus config
73 | orpheus create_user
74 | orpheus whoami
75 | ```
76 |
77 | The `init` command provides a mechanism to load a csv file into OrpheusDB as a CVD, with all the records as its first version (i.e., vid = 1). To let OrpheusDB know what is the schema for this dataset, user can provide a sample schema file through option `-s`. Each line in the schema file has the format `
, `. In the following example, `data.csv` file contains 3 attributes, namely `age`, `employee_id` and `salary`. The command below loads the `data.csv` file into OrpheusDB as a CVD named `dataset1`, whose schema is indicated in the file ``sample_schema.csv`.
78 |
79 |
80 |
81 | ```
82 | orpheus init test/data.csv dataset1 -s test/sample_schema.csv
83 | ```
84 |
85 | User can checkout one or more desired versions through the `checkout` command, to either a csv file or a structured table in RDBMS. In the following example, version 1 of CVD `dataset1` is checked out as a csv file named `checkout.csv`.
86 | ```
87 | orpheus checkout dataset1 -v 1 -f checkout.csv
88 | ```
89 |
90 | After changes are made to the previous checkout versions, OrpheusDB can commit these changes to its corresponding CVD assuming that the schema is unchanged.
91 |
92 | In the following example, we commit the modified checkout.csv back to CVD `dataset1`. Note here that since OrpheusDB internally logged the CVD name that `checkout.csv` file was checked out from, there is no need to specify the CVD name in the `commit` command.
93 |
94 | Any changed or new records from commit file will be appended to the corresponding CVD, labeled with a new version id. A special case is the committing of a subset of a previously checked-out version. In such a setting, OrpheusDB will perform the commit as expected; the new version is added with the subset of the records.
95 |
96 | ```
97 | orpheus commit -f checkout.csv -m 'first commit'
98 | ```
99 |
100 | OrpheusDB also supports direct execution of queries on CVDs without materialization. This is done via the run command. The run command will prompt the user to provide the SQL command to be executed directly. If `-f` is specified, it will execute the SQL file specified.
101 | ```
102 | orpheus run
103 | ```
104 |
105 | OrpheusDB supports a rich syntax of SQL statements on versions and CVDs. During the execution of these steatements, OrpheusDB will detect keywords like `CVD` so it knows the query is against one or more CVDs. There are mainly the following two types of queries supported:
106 |
107 | 1. Query against known version(s) of a particular dataset
108 | 2. Query against unknown version(s) of a particular dataset
109 |
110 | To query against known version(s), the version number needs to be specified. In the following example, OrpheusDB will select the `employee_id` and `age` columns from CVD `dataset1` whose version id is equal to either `1` or `2`.
111 | ```
112 | SELECT employee_id, age FROM VERSION 1,2 OF CVD dataset1;
113 | ```
114 |
115 | If version number is unknown, OrpheusDB supports queries where the desired version numbers are also identified. In the following examples, OrpheusDB will select all the version ids that have one or more records whose age is less than 25. It is worth noting that the `GROUP BY` clause is required to aggregate on version numbers.
116 | ```
117 | SELECT vid FROM CVD dataset1 WHERE age < 25 GROUP BY vid;
118 | ```
119 | Here are a couple other examples of SQL on versions:
120 |
121 | (1). Find all versions in CVD `dataset1` that have more than 5 records where salary is larger than 7400.
122 | ```
123 | SELECT vid FROM CVD dataset1 WHERE salary > 7400 GROUP BY vid HAVING COUNT(employee_id) > 5;
124 | ```
125 | (2). Find all versions in CVD `dataset1` whose commit time is later than December 1st, 2016.
126 | ```
127 | SELECT vid FROM CVD dataset1 WHERE commit_time > '2016-12-01' GROUP BY vid;
128 | ```
129 |
130 | ### Development Plan
131 | We plan to release versions of OrpheusDB in a regular manner, adding on further
132 | querying, partitioning, and query optimization capabilities, as well as regular bug-fixes.
133 |
134 | License
135 | ----
136 |
137 | MIT
138 |
139 | [//]: # (These are reference links used in the body of this note and get stripped out when the markdown processor does its job. There is no need to format nicely because it shouldn't be seen. Thanks SO - http://stackoverflow.com/questions/4823468/store-comments-in-markdown-syntax)
140 |
141 | [prof]: http://web.engr.illinois.edu/~adityagp/#
142 | [clicksite]: http://click.pocoo.org/5/
143 | [orpheus]: http://orpheus-db.github.io/
144 | [datahub]: https://arxiv.org/abs/1409.0798
145 | [postgressite]: https://www.postgresql.org/
146 | [papersite]:http://data-people.cs.illinois.edu/papers/orpheus.pdf
147 | [postgres-installation]: https://chartio.com/resources/tutorials/how-to-start-postgresql-server-on-mac-os-x/
148 | 1The partition optimization algorithms are not part of this release.
149 |
--------------------------------------------------------------------------------
/orpheus/core/relation.py:
--------------------------------------------------------------------------------
1 | class RelationNotExistError(Exception):
2 | def __init__(self, tablename):
3 | self.name = tablename
4 | def __str__(self):
5 | return "Relation %s does not exist" % self.name
6 |
7 | class RelationOverwriteError(Exception):
8 | def __init__(self, tablename):
9 | self.name = tablename
10 | def __str__(self):
11 | return "Relation %s exists, add flag to allow overwrite" % self.name
12 |
13 | class ReservedRelationError(Exception):
14 | def __init__(self, tablename):
15 | self.name = tablename
16 | def __str__(self):
17 | return "Relation %s is a reserved name, please use a different one" % self.name
18 |
19 | class ColumnNotExistError(Exception):
20 | def __init__(self, column):
21 | self.name = column
22 | def __str__(self):
23 | return "Column %s does not exist" % self.name
24 |
25 | class RelationManager(object):
26 | def __init__(self, conn):
27 | self.conn = conn;
28 |
29 | def get_datatable_attribute(self, from_table):
30 | selectTemplate = "SELECT column_name, data_type from INFORMATION_SCHEMA.COLUMNS where table_name = '%s' and column_name NOT IN ('rid');" % (from_table)
31 | self.conn.cursor.execute(selectTemplate)
32 | _datatable_attribute_types = self.conn.cursor.fetchall()
33 | # column name
34 | _attributes = map(lambda x : str(x[0]), _datatable_attribute_types)
35 | # data type
36 | _attributes_type = map(lambda x: str(x[1]), _datatable_attribute_types)
37 | return _attributes, _attributes_type
38 |
39 |
40 |
41 | def checkout_data_print(self, vlist, datatable, indextable, projection='*', where=None):
42 | if not self.check_table_exists(datatable):
43 | raise RelationNotExistError(datatable)
44 | return
45 | # user can only see everything except rid
46 | _attributes,_attributes_type = self.get_datatable_attribute(datatable)
47 | recordlist = self.select_records_of_version_list(vlist, indextable)
48 | if projection != '*':
49 | _attributes = projection.split(',')
50 | if where:
51 | sql = "SELECT %s FROM %s WHERE rid = ANY('%s'::int[]) AND %s;" % (",".join(_attributes), datatable, recordlist, "".join(where))
52 | else:
53 | sql = "SELECT %s FROM %s WHERE rid = ANY('%s'::int[]);" % (",".join(_attributes), datatable, recordlist)
54 | self.conn.cursor.execute(sql)
55 | #print sql
56 | return _attributes, self.conn.cursor.fetchall()
57 |
58 | def checkout_meta_print(self, versiontable, projection='*', where=None):
59 | if not self.check_table_exists(versiontable):
60 | raise RelationNotExistError(datatable)
61 | return
62 | _attributes,_attributes_type = self.get_datatable_attribute(versiontable)
63 |
64 | # TODO: need to change the where clause to match the corresponding type
65 | # for example, text -> 'text'
66 | version_type_map = {}
67 | for (a,b) in zip(_attributes, _attributes_type):
68 | version_type_map[a] = b
69 |
70 | if where:
71 | # where can be any type, need to interpreter
72 | try:
73 | where_type = version_type_map[where[0]] # the attribute to do select on
74 | except KeyError:
75 | raise ColumnNotExistError(where[0])
76 | return
77 | where_clause = where[0] + where[1] + "'%s'" % where[2] if where_type=='text' else "".join(where)
78 | sql = "SELECT %s from %s WHERE %s;" % (projection, versiontable, where_clause)
79 | else:
80 | sql = "SELECT %s from %s;" % (projection, versiontable)
81 | self.conn.cursor.execute(sql)
82 | #print sql
83 | return _attributes, self.conn.cursor.fetchall()
84 |
85 | # to_file needs an absolute path
86 | def checkout(self, vlist, datatable, indextable, to_table=None, to_file=None, delimiters=',', header=False, ignore=False):
87 | # sanity check
88 | if to_table:
89 | if RelationManager.reserve_table_check(to_table):
90 | raise ReservedRelationError(to_table)
91 | return
92 | if self.check_table_exists(to_table): # ask if user want to overwrite
93 | if ignore:
94 | self.drop_table_force(to_table)
95 | else:
96 | raise RelationOverwriteError(to_table)
97 | return
98 |
99 | if not self.check_table_exists(datatable):
100 | raise RelationNotExistError(datatable)
101 | return
102 |
103 | _attributes,_attributes_type = self.get_datatable_attribute(datatable)
104 | recordlist = self.select_records_of_version_list(vlist, indextable)
105 | #print recordlist
106 | if to_table:
107 | self.checkout_table(_attributes, recordlist, datatable, to_table, ignore)
108 | if to_file:
109 | self.checkout_file(_attributes, recordlist, datatable, to_file, delimiters, header)
110 |
111 | self.conn.connect.commit()
112 |
113 | def checkout_file(self, attributes, ridlist, datatable, to_file, delimiters, header):
114 | # convert to a tmp_table first
115 | self.drop_table_force('tmp_table')
116 | self.checkout_table(attributes, ridlist, datatable, 'tmp_table', None)
117 | sql = "COPY %s (%s) TO '%s' DELIMITER '%s' CSV HEADER;" if header else "COPY %s (%s) TO '%s' DELIMITER '%s' CSV;"
118 | sql = sql % ('tmp_table', ','.join(attributes), to_file, delimiters)
119 | self.conn.cursor.execute(sql)
120 |
121 |
122 | # Select the records into a new table
123 | def checkout_table(self, attributes, ridlist, datatable, to_table, ignore):
124 | if not ignore:
125 | sql = "SELECT %s INTO %s FROM %s WHERE rid = ANY('%s'::int[]);" \
126 | % (', '.join(attributes), to_table, datatable, ridlist)
127 | else:
128 | # TODO
129 | self.get_primary_key(datatable)
130 | sql = "SELECT %s INTO %s FROM %s WHERE rid = ANY('%s'::int[]);" \
131 | % (', '.join(attributes), to_table, datatable, ridlist)
132 | #print sql
133 | self.conn.cursor.execute(sql)
134 |
135 |
136 | def drop_table(self, table_name):
137 | if not self.check_table_exists(table_name):
138 | raise RelationNotExistError(table_name)
139 | return
140 | drop_sql = "DROP TABLE %s" % table_name
141 | self.conn.cursor.execute(drop_sql)
142 | self.conn.connect.commit()
143 |
144 |
145 | def drop_table_force(self, table_name):
146 | if not self.check_table_exists(table_name):
147 | return
148 | drop_sql = "DROP TABLE %s" % table_name
149 | self.conn.cursor.execute(drop_sql)
150 | self.conn.connect.commit()
151 |
152 | def select_all_rid(self, table_name):
153 | select_sql = "SELECT rid from %s;" % table_name
154 | self.conn.cursor.execute(select_sql)
155 | return [x[0] for x in self.conn.cursor.fetchall()]
156 |
157 | def generate_complement_sql(self, table1, view_name, attributes=None):
158 | if not attributes:
159 | sql = "TABLE %s EXCEPT TABLE %s" % (table1, view_name)
160 | else:
161 | sql = "(SELECT %s from %s) EXCEPT (SELECT %s from %s)" % (','.join(attributes), table1, ','.join(attributes), view_name)
162 | return sql
163 |
164 | def create_parent_view(self, datatable, indextable, parent_vlist, view_name):
165 | plist = ",".join(parent_vlist)
166 | sql = "CREATE VIEW %s AS \
167 | SELECT * FROM %s INNER JOIN %s ON rid = ANY(rlist) \
168 | WHERE vid = ANY(ARRAY[%s]);" % (view_name, datatable, indextable, plist)
169 | self.conn.cursor.execute(sql)
170 |
171 | def drop_view(self, view_name):
172 | sql = "DROP VIEW IF EXISTS %s;" % view_name
173 | self.conn.cursor.execute(sql)
174 |
175 | def select_intersection_table(self, table1, view_name, join_attributes, projection='rid'):
176 | # SELECT rid FROM tmp_table INNER JOIN dataset1_datatable ON tmp_table.employee_id=dataset1_datatable.employee_id;
177 | join_clause = " AND ".join(["%s.%s=%s.%s" % (table1, attr, view_name, attr) for attr in join_attributes])
178 | sql = "SELECT %s.%s FROM %s INNER JOIN %s on %s;" % (view_name, projection, table1, view_name, join_clause)
179 | self.conn.cursor.execute(sql)
180 | return self.conn.cursor.fetchall()
181 |
182 | def convert_csv_to_table(self, file_path, destination_table, attributes, delimiters=',', header=False):
183 | sql = "COPY %s (%s) FROM '%s' DELIMITER '%s' CSV HEADER;" % (destination_table, ",".join(attributes), file_path, delimiters) if header \
184 | else "COPY %s (%s) FROM '%s' DELIMITER '%s' CSV;" % (destination_table, ",".join(attributes), file_path, delimiters)
185 | self.conn.cursor.execute(sql)
186 | self.conn.connect.commit()
187 |
188 | def create_relation(self,table_name):
189 | # Use CREATE SQL COMMAND
190 | print "create_relation: Under Construction."
191 |
192 | # will drop existing table to create the new table
193 | def create_relation_force(self, table_name, sample_table, sample_table_attributes=None):
194 | if self.check_table_exists(table_name):
195 | self.drop_table(table_name)
196 | if not sample_table_attributes:
197 | sample_table_attributes,_ = self.get_datatable_attribute(sample_table)
198 | # sql = "CREATE TABLE %s ( like %s including all);" % (table_name, sample_table)
199 |
200 | # an easier approach to create empty table
201 | sql = "CREATE TABLE %s AS SELECT %s FROM %s WHERE 1=2;" % (table_name, ",".join(sample_table_attributes), sample_table)
202 | self.conn.cursor.execute(sql)
203 | self.conn.connect.commit()
204 |
205 |
206 | def check_table_exists(self,table_name):
207 | # SQL to check the exisistence of the table
208 | # print "checking if table %s exists" %(table_name)
209 | sql= "SELECT EXISTS (" \
210 | "SELECT 1 " \
211 | "FROM information_schema.tables " \
212 | "WHERE table_name = '%s');" % table_name
213 | # print sql
214 | self.conn.cursor.execute(sql)
215 | result = self.conn.cursor.fetchall()
216 | # print result[0][0]
217 | return result[0][0]
218 |
219 | def update_datatable(self, datatable_name, sql):
220 | _attributes, _attributes_type = self.get_datatable_attribute(datatable_name)
221 | sql = "INSERT INTO %s (%s) %s RETURNING rid;" % (datatable_name, ', '.join(_attributes), sql)
222 | self.conn.cursor.execute(sql)
223 | new_rids=[t[0] for t in self.conn.cursor.fetchall()]
224 | self.conn.connect.commit()
225 | # print new_rids
226 | return new_rids
227 |
228 | def clean(self):
229 | print "Clean: Under Construction."#????
230 |
231 | @staticmethod
232 | def reserve_table_check(name):
233 | '''
234 | @summary: check if name is reserved
235 | @param name: name to be checked
236 | @result: return True if it is reserved
237 | '''
238 | # return name == 'datatable' or name == 'indextbl' or name == 'version' or name == 'tmp_table'
239 | return '_datatable' in name or '_indexTbl' in name or '_version' in name or 'orpheus' in name
240 |
241 |
242 | def select_records_of_version_list(self, vlist, indextable):
243 | targetv= ','.join(vlist)
244 | # sql = "SELECT distinct rlist FROM %s WHERE vlist && (ARRAY[%s]);" % (indextable, targetv)
245 | sql = "SELECT distinct rlist FROM %s WHERE vid = ANY(ARRAY[%s]);" % (indextable, targetv)
246 | self.conn.cursor.execute(sql)
247 | data = [','.join(map(str,x[0])) for x in self.conn.cursor.fetchall()]
248 | # data
249 | return '{' + ','.join(data) + '}'
250 |
251 | def get_primary_key(self,tablename): #this method return nothing, what you want?
252 | sql="SELECT a.attname, format_type(a.atttypid, a.atttypmod) AS data_type FROM pg_index i " \
253 | "JOIN pg_attribute a ON a.attrelid = i.indrelid " \
254 | "AND a.attnum = ANY(i.indkey)" \
255 | "WHERE i.indrelid = '%s'::regclass " \
256 | "AND i.indisprimary;"%tablename
257 | self.conn.cursor.execute(sql)
258 | #print tablename+'\'s primary key'
259 | #print self.conn.cursor.fetchall()
260 |
261 | def get_number_of_rows(self,tablename):
262 | sql = "SELECT COUNT (*) from %s" % tablename
263 | self.conn.cursor.execute(sql)
264 | result = self.conn.cursor.fetchall()
265 | # print result
266 | return result[0][0]
--------------------------------------------------------------------------------
/orpheus/core/executor.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import user
3 | import json
4 | import pandas as pd
5 | import os
6 | import sqlparse
7 |
8 | from db import DatasetExistsError
9 | from relation import RelationManager, RelationNotExistError, RelationOverwriteError, ReservedRelationError
10 | from orpheus_exceptions import BadStateError, NotImplementedError, BadParametersError
11 | from orpheus_sqlparse import SQLParser
12 | from django.contrib import messages
13 | from orpheus.core.vgraph import VersionGraph
14 | from access import AccessManager
15 | from version import VersionManager
16 | from metadata import MetadataManager
17 | from user_control import UserManager
18 | from orpheus_schema_parser import Parser as SimpleSchemaParser
19 | from helper import Print
20 | import orpheus_const as const
21 |
22 | class Executor(object):
23 | def __init__(self, config, request = False):
24 | self.config = config
25 | self.request = request
26 | self.p = Print(request)
27 |
28 | def exec_init(self, input_file, dataset, table, schema, conn):
29 | try:
30 | rel = RelationManager(conn)
31 |
32 | if (not table and not schema) or (table and schema):
33 | raise BadParametersError("Need either (not both) a table or a schema file")
34 | return
35 |
36 | abs_path = self.config['orpheus_home'] + schema if schema and schema[0] != '/' else schema
37 |
38 | if table:
39 | attribute_name , attribute_type = rel.get_datatable_attribute(table)
40 | else:
41 | attribute_name , attribute_type = SimpleSchemaParser.get_attribute_from_file(abs_path)
42 |
43 | except Exception as e:
44 | import traceback
45 | traceback.print_exc()
46 | self.p.perror(str(e))
47 | raise Exception
48 | return
49 | # at this point, we have a valid conn obj and rel obj
50 | try:
51 | # schema of the dataset, of the type (name, type)
52 | schema_tuple = zip(attribute_name, attribute_type)
53 | # create new dataset
54 | conn.create_dataset(input_file, dataset, schema_tuple, attributes=attribute_name)
55 | # get all rids in list
56 | lis_rid = rel.select_all_rid(const.PUBLIC_SCHEMA + dataset + const.DATATABLE_SUFFIX)
57 | # init version info
58 | version = VersionManager(conn, self.request)
59 |
60 | version.init_version_graph_dataset(dataset, lis_rid, self.config['user'])
61 | version.init_index_table_dataset(dataset, lis_rid)
62 | except DatasetExistsError as e:
63 | self.p.perror(str(e))
64 | return
65 | except Exception as e:
66 | # revert back to the state before create
67 | conn.drop_dataset(dataset)
68 | self.p.perror(str(e))
69 | return
70 |
71 | graph = VersionGraph(self.config, self.request)
72 | try:
73 | graph.init_vGraph_json(dataset, 1) # init vid = 1
74 | except Exception as e:
75 | graph.delete_vGraph_json(dataset)
76 | raise Exception
77 | return
78 | self.p.pmessage("Dataset [%s] has been created successful" % dataset)
79 |
80 | def exec_drop(self, dataset, conn):
81 | # TODO: add a popup window to confirm
82 | # E.g. if click.confirm('Are you sure you want to drop %s?' % dataset):
83 | try:
84 |
85 | conn.drop_dataset(dataset)
86 | self.p.pmessage("Dataset [%s] has been dropped." % dataset)
87 | except Exception as e:
88 | self.p.perror(str(e))
89 | raise Exception
90 | return
91 |
92 | graph = VersionGraph(self.config, self.request)
93 | graph.delete_vGraph_json(dataset)
94 |
95 | def exec_checkout(self, dataset, vlist, to_table, to_file, delimiters, header, ignore, conn):
96 | if not to_table and not to_file:
97 | self.p.perror(str(BadParametersError("Need a destination, either a table (-t) or a file (-f)")))
98 | return
99 | try:
100 | relation = RelationManager(conn)
101 | except Exception as e:
102 | self.p.perror(str(e))
103 | raise Exception
104 | return
105 | abs_path = self.config['orpheus_home'] + to_file if to_file and to_file[0] != '/' else to_file
106 | try:
107 | metadata = MetadataManager(self.config, self.request)
108 | meta_obj = metadata.load_meta()
109 | datatable = dataset + const.DATATABLE_SUFFIX
110 | indextable = dataset + const.INDEXTABLE_SUFFIX
111 | relation.checkout(vlist, datatable, indextable, to_table=to_table, to_file=abs_path, delimiters=delimiters, header=header, ignore=ignore)
112 | # update meta info
113 | AccessManager.grant_access(to_table, conn.user)
114 | metadata.update(to_table, abs_path, dataset, vlist, meta_obj)
115 | metadata.commit_meta(meta_obj)
116 | if to_table:
117 | self.p.pmessage("Table %s has been cloned from version %s" % (to_table, ",".join(vlist)))
118 | if to_file:
119 | self.p.pmessage("File %s has been cloned from version %s" % (to_file, ",".join(vlist)))
120 | except Exception as e:
121 | if to_table and not (RelationOverwriteError or ReservedRelationError):
122 | relation.drop_table(to_table)
123 | if to_file:
124 | pass # delete the file
125 | self.p.perror(str(e))
126 | raise Exception
127 | return
128 |
129 | def exec_commit(self, message, table_name, file_name, delimiters, header, conn):
130 | # sanity check
131 | if not table_name and not file_name:
132 | self.p.perror(str(BadParametersError("Need a source, either a table (-t) or a file (-f)")))
133 | return
134 |
135 | if table_name and file_name:
136 | self.p.perror(str(NotImplementedError("Can either commit a file or a table at a time")))
137 | return
138 |
139 | try:
140 | relation = RelationManager(conn)
141 | metadata = MetadataManager(self.config, self.request)
142 | version = VersionManager(conn, self.request)
143 | except Exception as e:
144 | self.p.perror(str(e))
145 | raise Exception
146 | return
147 | if table_name and not relation.check_table_exists(table_name):
148 | self.p.perror(str(RelationNotExistError(table_name)))
149 | raise Exception
150 | return
151 | # load parent information about the table
152 | # We need to get the derivation information of the committed table;
153 | # Otherwise, in the multitable scenario, we do not know which datatable/version_graph/index_table
154 | # that we need to update information.
155 | try:
156 | abs_path = self.config['orpheus_home'] + file_name if file_name else self.config['orpheus_home']
157 | parent_vid_list = metadata.load_parent_id(table_name) if table_name else metadata.load_parent_id(abs_path, mapping='file_map')
158 | self.p.pmessage("Parent dataset is %s " % parent_vid_list[0])
159 | self.p.pmessage("Parent versions are %s " % ",".join(parent_vid_list[1]))
160 | except Exception as e:
161 | self.p.perror(str(e))
162 | raise Exception
163 | return
164 | parent_name = parent_vid_list[0]
165 | parent_list = parent_vid_list[1]
166 |
167 | datatable_name = parent_name + const.DATATABLE_SUFFIX
168 | indextable_name = parent_name + const.INDEXTABLE_SUFFIX
169 | graph_name = parent_name + const.VERSIONTABLE_SUFFIX
170 | try:
171 | # convert file into tmp_table first, then set the table_name to tmp_table
172 | if file_name:
173 | # need to know the schema for this file
174 | _attributes, _attributes_type = relation.get_datatable_attribute(datatable_name)
175 |
176 | relation.create_relation_force('tmp_table', datatable_name, sample_table_attributes=_attributes) # create a tmp table
177 | relation.convert_csv_to_table(abs_path, 'tmp_table', _attributes , delimiters=delimiters, header=header) # push everything from csv to tmp_table
178 | table_name = 'tmp_table'
179 | except Exception as e:
180 | self.p.perror(str(e))
181 | raise Exception
182 | return
183 |
184 |
185 | if table_name:
186 | try:
187 | _attributes, _attributes_type = relation.get_datatable_attribute(datatable_name)
188 | commit_attributes, commit_type = relation.get_datatable_attribute(table_name)
189 | if len(set(_attributes) - set(commit_attributes)) > 0:
190 | raise BadStateError("%s and %s have different attributes" % (table_name, parent_name))
191 | view_name = "%s_view" % parent_name
192 | relation.create_parent_view(datatable_name, indextable_name, parent_list, view_name)
193 | existing_rids = [t[0] for t in relation.select_intersection_table(table_name, view_name, commit_attributes)]
194 | sql = relation.generate_complement_sql(table_name, view_name, attributes=_attributes)
195 |
196 | new_rids = relation.update_datatable(datatable_name, sql)
197 | relation.drop_view(view_name)
198 |
199 | self.p.pmessage("Found %s new records" % len(new_rids))
200 | self.p.pmessage("Found %s existing records" % len(existing_rids))
201 |
202 | current_version_rid = existing_rids + new_rids
203 |
204 | # it can happen that there are duplicate in here
205 | table_create_time = metadata.load_table_create_time(table_name) if table_name != 'tmp_table' else None
206 |
207 | # update version graph
208 | curt_vid = version.update_version_graph(graph_name, self.config['user'], len(current_version_rid), parent_list, table_create_time, message)
209 |
210 | # update index table
211 | version.update_index_table(indextable_name, curt_vid, current_version_rid)
212 | self.p.pmessage("Committing version %s with %s records" % (curt_vid, len(current_version_rid)))
213 |
214 | metadata.update_parent_id(table_name, parent_name, curt_vid) if table_name else metadata.update_parent_id(abs_path, parent_name, curt_vid, mapping='file_map')
215 | except Exception as e:
216 | view_name = "%s_view" % parent_name
217 | relation.drop_view(view_name)
218 | self.p.perror(str(e))
219 | raise Exception
220 | return
221 |
222 | if relation.check_table_exists('tmp_table'):
223 | relation.drop_table('tmp_table')
224 |
225 | graph = VersionGraph(self.config, self.request)
226 | graph.update_vGraph_json(parent_name, curt_vid, parent_list)
227 |
228 | self.p.pmessage("Version %s has been committed!" % curt_vid)
229 | return parent_name, curt_vid, parent_list
230 |
231 | def exec_run(self, sql, conn):
232 | try:
233 | # execute_sql_line(ctx, sql)
234 | sqlparser = SQLParser(conn)
235 | executable_sql = sqlparser.parse(sql)
236 | return conn.execute_sql(executable_sql)
237 | except Exception as e:
238 | import traceback
239 | traceback.print_exc()
240 | messages.error(self.request, str(e))
241 | raise Exception
242 | return
243 |
244 | def exec_explain(self, sql, conn):
245 | try:
246 | # execute_sql_line(ctx, sql)
247 | sqlparser = SQLParser(conn)
248 | executable_sql = sqlparser.parse(sql)
249 | ret_sql = sqlparse.format(executable_sql, reindent=True, keyword_case='upper')
250 | # TODO: less hacky -- success = SQL, others = other message
251 | messages.success(self.request, ret_sql)
252 | # messages.info(self.request, ret_sql, extra_tags='sql')
253 | return None
254 | except Exception as e:
255 | import traceback
256 | traceback.print_exc()
257 | messages.error(self.request, str(e))
258 | raise Exception
259 | return
260 |
261 | # Show the underlying data structure
262 | def exec_show(self, dataset, conn):
263 | table_list = []
264 |
265 | versiontable = dataset + const.VERSIONTABLE_SUFFIX
266 | self.__exec_show_helper(conn, versiontable, table_list)
267 |
268 | datatable = dataset + const.DATATABLE_SUFFIX
269 | self.__exec_show_helper(conn, datatable, table_list, "rid")
270 |
271 | indextable = dataset + const.INDEXTABLE_SUFFIX
272 | self.__exec_show_helper(conn, indextable, table_list)
273 |
274 | return table_list
275 |
276 | def __exec_show_helper(self, conn, table_name, table_list, pk = "vid"):
277 | sql = "SELECT * FROM %s ORDER BY %s LIMIT 4 ;" % (table_name, pk)
278 | attr_names, transactions = conn.execute_sql(sql)
279 | table_list.append((attr_names, transactions))
280 |
281 | def exec_restore(self, conn):
282 | cvd_name = "protein_links"
283 | try:
284 |
285 | # Drop all CVDs
286 | cvd_list = CVDs.objects.values('name')
287 | for cvd in cvd_list:
288 | messages.info(self.request, "Dropping the CVD [%s] ..." % cvd['name'])
289 | self.exec_drop(cvd['name'])
290 |
291 |
292 | # Drop all private tables
293 | private_tables = PrivateTables.objects.values('name')
294 | for table in private_tables:
295 | messages.info(self.request, "Dropping the private table [%s] ..." % table['name'])
296 | sql = "DROP TABLE IF EXISTS \"%s\";" % table['name']
297 | conn.refresh_cursor()
298 | conn.execute_sql(sql)
299 |
300 | # delete all local files
301 | private_files = PrivateFiles.objects.values('name')
302 | for file_name in private_files:
303 | messages.info(self.request, "Dropping the private file [%s]" % file_name['name'])
304 | fpath = self.config['orpheus_home'] + file_name['name']
305 | try:
306 | os.remove(fpath)
307 | except OSError:
308 | pass
309 |
310 | # delete all tuple in privatetables and privatefiles,
311 | # and all but "protein_link" in CVDs
312 | PrivateTables.objects.all().delete()
313 | PrivateFiles.objects.all().delete()
314 | CVDs.objects.filter(~ Q(name = cvd_name)).delete()
315 | messages.info(self.request, "Cleared all Django models")
316 |
317 | # restore records from protein_links_backup
318 | datatable = const.PUBLIC_SCHEMA + cvd_name + const.DATATABLE_SUFFIX
319 | self.__exec_restore_helper(conn, datatable, True, "rid")
320 | messages.info(self.request, "Restored %s " % datatable)
321 |
322 | indextable = const.PUBLIC_SCHEMA + cvd_name + const.INDEXTABLE_SUFFIX
323 | self.__exec_restore_helper(conn, indextable)
324 | messages.info(self.request, "Restored %s " % indextable)
325 |
326 | versiontable = const.PUBLIC_SCHEMA + cvd_name + const.VERSIONTABLE_SUFFIX
327 | self.__exec_restore_helper(conn, versiontable)
328 | messages.info(self.request, "Restored %s " % versiontable)
329 |
330 | # Copy vGraph from backup
331 | vGraph_path = self.config['vGraph_json'] + "/" + cvd_name
332 | vGraph_path_backup = self.config['vGraph_json'] + "/" + cvd_name + "_backup"
333 | with open(vGraph_path_backup) as f:
334 | with open(vGraph_path, "w") as f1:
335 | for line in f:
336 | f1.write(line)
337 | f.close()
338 | f1.close()
339 | messages.info(self.request, "Restored Version Graph ")
340 |
341 | except Exception:
342 | conn.refresh_cursor()
343 | # TODO the exception message is vague
344 | raise Exception
345 | return
346 |
347 | def __exec_restore_helper(self, conn, table_name, isSerial = False, pk = "vid"):
348 | sql = "SELECT * INTO %s FROM %s;" % (table_name, table_name + "_backup")
349 | conn.refresh_cursor()
350 | conn.execute_sql(sql)
351 | if isSerial:
352 | sql = "ALTER TABLE %s ADD COLUMN %s SERIAL PRIMARY KEY;" % (table_name, pk)
353 | else:
354 | sql = "ALTER TABLE %s ADD COLUMN %s PRIMARY KEY" % (table_name, pk)
355 | conn.connect.commit()
356 |
--------------------------------------------------------------------------------
/orpheus/interface/main/static/assets/js/vendor/holder.min.js:
--------------------------------------------------------------------------------
1 | /*!
2 |
3 | Holder - client side image placeholders
4 | Version 2.6.0+51ebp
5 | © 2015 Ivan Malopinsky - http://imsky.co
6 |
7 | Site: http://holderjs.com
8 | Issues: https://github.com/imsky/holder/issues
9 | License: http://opensource.org/licenses/MIT
10 |
11 | */
12 | !function(a,b){"object"==typeof exports&&"object"==typeof module?module.exports=b():"function"==typeof define&&define.amd?define(b):"object"==typeof exports?exports.Holder=b():a.Holder=b()}(this,function(){return function(a){function b(d){if(c[d])return c[d].exports;var e=c[d]={exports:{},id:d,loaded:!1};return a[d].call(e.exports,e,e.exports,b),e.loaded=!0,e.exports}var c={};return b.m=a,b.c=c,b.p="",b(0)}([function(a,b,c){(function(b){function d(a,b,c,d){var g=e(c.substr(c.lastIndexOf(a.domain)),a);g&&f({mode:null,el:d,flags:g,engineSettings:b})}function e(a,b){for(var c={theme:y(K.settings.themes.gray,null),stylesheets:b.stylesheets,holderURL:[]},d=!1,e=String.fromCharCode(11),f=a.replace(/([^\\])\//g,"$1"+e).split(e),g=/%[0-9a-f]{2}/gi,h=f.length,i=0;h>i;i++){var j=f[i];if(j.match(g))try{j=decodeURIComponent(j)}catch(k){j=f[i]}var l=!1;if(K.flags.dimensions.match(j))d=!0,c.dimensions=K.flags.dimensions.output(j),l=!0;else if(K.flags.fluid.match(j))d=!0,c.dimensions=K.flags.fluid.output(j),c.fluid=!0,l=!0;else if(K.flags.textmode.match(j))c.textmode=K.flags.textmode.output(j),l=!0;else if(K.flags.colors.match(j)){var m=K.flags.colors.output(j);c.theme=y(c.theme,m),l=!0}else if(b.themes[j])b.themes.hasOwnProperty(j)&&(c.theme=y(b.themes[j],null)),l=!0;else if(K.flags.font.match(j))c.font=K.flags.font.output(j),l=!0;else if(K.flags.auto.match(j))c.auto=!0,l=!0;else if(K.flags.text.match(j))c.text=K.flags.text.output(j),l=!0;else if(K.flags.size.match(j))c.size=K.flags.size.output(j),l=!0;else if(K.flags.random.match(j)){null==K.vars.cache.themeKeys&&(K.vars.cache.themeKeys=Object.keys(b.themes));var n=K.vars.cache.themeKeys[0|Math.random()*K.vars.cache.themeKeys.length];c.theme=y(b.themes[n],null),l=!0}l&&c.holderURL.push(j)}return c.holderURL.unshift(b.domain),c.holderURL=c.holderURL.join("/"),d?c:!1}function f(a){var b=a.mode,c=a.el,d=a.flags,e=a.engineSettings,f=d.dimensions,h=d.theme,i=f.width+"x"+f.height;if(b=null==b?d.fluid?"fluid":"image":b,null!=d.text&&(h.text=d.text,"object"===c.nodeName.toLowerCase())){for(var l=h.text.split("\\n"),m=0;m1){var l=0,m=0,n=a.width*K.setup.lineWrapRatio,o=0;k=new e.Group("line"+o);for(var p=0;p=n||r===!0)&&(b(g,k,l,g.properties.leading),l=0,m+=g.properties.leading,o+=1,k=new e.Group("line"+o),k.y=m),r!==!0&&(j.moveTo(l,0),l+=h.spaceWidth+q.width,k.add(j))}b(g,k,l,g.properties.leading);for(var s in g.children)k=g.children[s],k.moveTo((g.width-k.width)/2,null,null);g.moveTo((a.width-g.width)/2,(a.height-g.height)/2,null),(a.height-g.height)/2<0&&g.moveTo(null,0,null)}else j=new e.Text(a.text),k=new e.Group("line0"),k.add(j),g.add(k),g.moveTo((a.width-h.boundingBox.width)/2,(a.height-h.boundingBox.height)/2,null);return d}function i(a,b,c){var d=parseInt(a,10),e=parseInt(b,10),f=Math.max(d,e),g=Math.min(d,e),h=.8*Math.min(g,f*K.defaults.scale);return Math.round(Math.max(c,h))}function j(a){var b;b=null==a||null==a.nodeType?K.vars.resizableImages:[a];for(var c=0,d=b.length;d>c;c++){var e=b[c];if(e.holderData){var f=e.holderData.flags,h=E(e);if(h){if(!e.holderData.resizeUpdate)continue;if(f.fluid&&f.auto){var i=e.holderData.fluidConfig;switch(i.mode){case"width":h.height=h.width/i.ratio;break;case"height":h.width=h.height*i.ratio}}var j={mode:"image",holderSettings:{dimensions:h,theme:f.theme,flags:f},el:e,engineSettings:e.holderData.engineSettings};"exact"==f.textmode&&(f.exactDimensions=h,j.holderSettings.dimensions=f.dimensions),g(j)}else n(e)}}}function k(a){if(a.holderData){var b=E(a);if(b){var c=a.holderData.flags,d={fluidHeight:"%"==c.dimensions.height.slice(-1),fluidWidth:"%"==c.dimensions.width.slice(-1),mode:null,initialDimensions:b};d.fluidWidth&&!d.fluidHeight?(d.mode="width",d.ratio=d.initialDimensions.width/parseFloat(c.dimensions.height)):!d.fluidWidth&&d.fluidHeight&&(d.mode="height",d.ratio=parseFloat(c.dimensions.width)/d.initialDimensions.height),a.holderData.fluidConfig=d}else n(a)}}function l(){for(var a,c=[],d=Object.keys(K.vars.invisibleImages),e=0,f=d.length;f>e;e++)a=K.vars.invisibleImages[d[e]],E(a)&&"img"==a.nodeName.toLowerCase()&&(c.push(a),delete K.vars.invisibleImages[d[e]]);c.length&&J.run({images:c}),b.requestAnimationFrame(l)}function m(){K.vars.visibilityCheckStarted||(b.requestAnimationFrame(l),K.vars.visibilityCheckStarted=!0)}function n(a){a.holderData.invisibleId||(K.vars.invisibleId+=1,K.vars.invisibleImages["i"+K.vars.invisibleId]=a,a.holderData.invisibleId=K.vars.invisibleId)}function o(a,b){return null==b?document.createElement(a):document.createElementNS(b,a)}function p(a,b){for(var c in b)a.setAttribute(c,b[c])}function q(a,b,c){var d,e;null==a?(a=o("svg",F),d=o("defs",F),e=o("style",F),p(e,{type:"text/css"}),d.appendChild(e),a.appendChild(d)):e=a.querySelector("style"),a.webkitMatchesSelector&&a.setAttribute("xmlns",F);for(var f=0;f=0;h--){var i=g.createProcessingInstruction("xml-stylesheet",'href="'+f[h]+'" rel="stylesheet"');g.insertBefore(i,g.firstChild)}var j=g.createProcessingInstruction("xml",'version="1.0" encoding="UTF-8" standalone="yes"');g.insertBefore(j,g.firstChild),g.removeChild(g.documentElement),e=d.serializeToString(g)}var k=d.serializeToString(a);return k=k.replace(/\&(\#[0-9]{2,}\;)/g,"&$1"),e+k}}function s(){return b.DOMParser?(new DOMParser).parseFromString("","application/xml"):void 0}function t(a){K.vars.debounceTimer||a.call(this),K.vars.debounceTimer&&b.clearTimeout(K.vars.debounceTimer),K.vars.debounceTimer=b.setTimeout(function(){K.vars.debounceTimer=null,a.call(this)},K.setup.debounce)}function u(){t(function(){j(null)})}var v=c(1),w=c(2),x=c(3),y=x.extend,z=x.cssProps,A=x.encodeHtmlEntity,B=x.decodeHtmlEntity,C=x.imageExists,D=x.getNodeArray,E=x.dimensionCheck,F="http://www.w3.org/2000/svg",G=8,H="2.6.0",I="\nCreated with Holder.js "+H+".\nLearn more at http://holderjs.com\n(c) 2012-2015 Ivan Malopinsky - http://imsky.co\n",J={version:H,addTheme:function(a,b){return null!=a&&null!=b&&(K.settings.themes[a]=b),delete K.vars.cache.themeKeys,this},addImage:function(a,b){var c=document.querySelectorAll(b);if(c.length)for(var d=0,e=c.length;e>d;d++){var f=o("img"),g={};g[K.vars.dataAttr]=a,p(f,g),c[d].appendChild(f)}return this},setResizeUpdate:function(a,b){a.holderData&&(a.holderData.resizeUpdate=!!b,a.holderData.resizeUpdate&&j(a))},run:function(a){a=a||{};var c={},g=y(K.settings,a);K.vars.preempted=!0,K.vars.dataAttr=g.dataAttr||K.vars.dataAttr,c.renderer=g.renderer?g.renderer:K.setup.renderer,-1===K.setup.renderers.join(",").indexOf(c.renderer)&&(c.renderer=K.setup.supportsSVG?"svg":K.setup.supportsCanvas?"canvas":"html");var h=D(g.images),i=D(g.bgnodes),j=D(g.stylenodes),k=D(g.objects);c.stylesheets=[],c.svgXMLStylesheet=!0,c.noFontFallback=g.noFontFallback?g.noFontFallback:!1;for(var l=0;l1){c.nodeValue="";for(var u=0;u=0?b:1)}function f(a){v?e(a):w.push(a)}null==document.readyState&&document.addEventListener&&(document.addEventListener("DOMContentLoaded",function y(){document.removeEventListener("DOMContentLoaded",y,!1),document.readyState="complete"},!1),document.readyState="loading");var g=a.document,h=g.documentElement,i="load",j=!1,k="on"+i,l="complete",m="readyState",n="attachEvent",o="detachEvent",p="addEventListener",q="DOMContentLoaded",r="onreadystatechange",s="removeEventListener",t=p in g,u=j,v=j,w=[];if(g[m]===l)e(b);else if(t)g[p](q,c,j),a[p](i,c,j);else{g[n](r,c),a[n](k,c);try{u=null==a.frameElement&&h}catch(x){}u&&u.doScroll&&!function z(){if(!v){try{u.doScroll("left")}catch(a){return e(z,50)}d(),b()}}()}return f.version="1.4.0",f.isReady=function(){return v},f}a.exports="undefined"!=typeof window&&b(window)},function(a,b,c){var d=c(4),e=function(a){function b(a,b){for(var c in b)a[c]=b[c];return a}var c=1,e=d.defclass({constructor:function(a){c++,this.parent=null,this.children={},this.id=c,this.name="n"+c,null!=a&&(this.name=a),this.x=0,this.y=0,this.z=0,this.width=0,this.height=0},resize:function(a,b){null!=a&&(this.width=a),null!=b&&(this.height=b)},moveTo:function(a,b,c){this.x=null!=a?a:this.x,this.y=null!=b?b:this.y,this.z=null!=c?c:this.z},add:function(a){var b=a.name;if(null!=this.children[b])throw"SceneGraph: child with that name already exists: "+b;this.children[b]=a,a.parent=this}}),f=d(e,function(b){this.constructor=function(){b.constructor.call(this,"root"),this.properties=a}}),g=d(e,function(a){function c(c,d){if(a.constructor.call(this,c),this.properties={fill:"#000"},null!=d)b(this.properties,d);else if(null!=c&&"string"!=typeof c)throw"SceneGraph: invalid node name"}this.Group=d.extend(this,{constructor:c,type:"group"}),this.Rect=d.extend(this,{constructor:c,type:"rect"}),this.Text=d.extend(this,{constructor:function(a){c.call(this),this.properties.text=a},type:"text"})}),h=new f;return this.Shape=g,this.root=h,this};a.exports=e},function(a,b){(function(a){b.extend=function(a,b){var c={};for(var d in a)a.hasOwnProperty(d)&&(c[d]=a[d]);if(null!=b)for(var e in b)b.hasOwnProperty(e)&&(c[e]=b[e]);return c},b.cssProps=function(a){var b=[];for(var c in a)a.hasOwnProperty(c)&&b.push(c+":"+a[c]);return b.join(";")},b.encodeHtmlEntity=function(a){for(var b=[],c=0,d=a.length-1;d>=0;d--)c=a.charCodeAt(d),b.unshift(c>128?["",c,";"].join(""):a[d]);return b.join("")},b.getNodeArray=function(b){var c=null;return"string"==typeof b?c=document.querySelectorAll(b):a.NodeList&&b instanceof a.NodeList?c=b:a.Node&&b instanceof a.Node?c=[b]:a.HTMLCollection&&b instanceof a.HTMLCollection?c=b:b instanceof Array?c=b:null===b&&(c=[]),c},b.imageExists=function(a,b){var c=new Image;c.onerror=function(){b.call(this,!1)},c.onload=function(){b.call(this,!0)},c.src=a},b.decodeHtmlEntity=function(a){return a.replace(/(\d+);/g,function(a,b){return String.fromCharCode(b)})},b.dimensionCheck=function(a){var b={height:a.clientHeight,width:a.clientWidth};return b.height&&b.width?b:!1}}).call(b,function(){return this}())},function(a){var b=function(){},c=Array.prototype.slice,d=function(a,d){var e=b.prototype="function"==typeof a?a.prototype:a,f=new b,g=d.apply(f,c.call(arguments,2).concat(e));if("object"==typeof g)for(var h in g)f[h]=g[h];if(!f.hasOwnProperty("constructor"))return f;var i=f.constructor;return i.prototype=f,i};d.defclass=function(a){var b=a.constructor;return b.prototype=a,b},d.extend=function(a,b){return d(a,function(a){return this.uber=a,b})},a.exports=d}])});
--------------------------------------------------------------------------------
/orpheus/interface/main/static/assets/css/docs.min.css:
--------------------------------------------------------------------------------
1 | /*!
2 | * IE10 viewport hack for Surface/desktop Windows 8 bug
3 | * Copyright 2014-2015 Twitter, Inc.
4 | * Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
5 | */@-ms-viewport{width:device-width}@-o-viewport{width:device-width}@viewport{width:device-width}.hll{background-color:#ffc}.c{color:#999}.err{color:#A00;background-color:#FAA}.k{color:#069}.o{color:#555}.cm{color:#999}.cp{color:#099}.c1{color:#999}.cs{color:#999}.gd{background-color:#FCC;border:1px solid #C00}.ge{font-style:italic}.gr{color:red}.gh{color:#030}.gi{background-color:#CFC;border:1px solid #0C0}.go{color:#AAA}.gp{color:#009}.gu{color:#030}.gt{color:#9C6}.kc{color:#069}.kd{color:#069}.kn{color:#069}.kp{color:#069}.kr{color:#069}.kt{color:#078}.m{color:#F60}.s{color:#d44950}.na{color:#4f9fcf}.nb{color:#366}.nc{color:#0A8}.no{color:#360}.nd{color:#99F}.ni{color:#999}.ne{color:#C00}.nf{color:#C0F}.nl{color:#99F}.nn{color:#0CF}.nt{color:#2f6f9f}.nv{color:#033}.ow{color:#000}.w{color:#bbb}.mf{color:#F60}.mh{color:#F60}.mi{color:#F60}.mo{color:#F60}.sb{color:#C30}.sc{color:#C30}.sd{color:#C30;font-style:italic}.s2{color:#C30}.se{color:#C30}.sh{color:#C30}.si{color:#A00}.sx{color:#C30}.sr{color:#3AA}.s1{color:#C30}.ss{color:#FC3}.bp{color:#366}.vc{color:#033}.vg{color:#033}.vi{color:#033}.il{color:#F60}.css .nt+.nt,.css .o,.css .o+.nt{color:#999}/*!
6 | * Bootstrap Docs (http://getbootstrap.com)
7 | * Copyright 2011-2016 Twitter, Inc.
8 | * Licensed under the Creative Commons Attribution 3.0 Unported License. For
9 | * details, see https://creativecommons.org/licenses/by/3.0/.
10 | */body{position:relative}.table code{font-size:13px;font-weight:400}h2 code,h3 code,h4 code{background-color:inherit}.btn-outline{color:#563d7c;background-color:transparent;border-color:#563d7c}.btn-outline:active,.btn-outline:focus,.btn-outline:hover{color:#fff;background-color:#563d7c;border-color:#563d7c}.btn-outline-inverse{color:#fff;background-color:transparent;border-color:#cdbfe3}.btn-outline-inverse:active,.btn-outline-inverse:focus,.btn-outline-inverse:hover{color:#563d7c;text-shadow:none;background-color:#fff;border-color:#fff}.bs-docs-booticon{display:block;font-weight:500;color:#fff;text-align:center;cursor:default;background-color:#563d7c;border-radius:15%}.bs-docs-booticon-sm{width:30px;height:30px;font-size:20px;line-height:28px}.bs-docs-booticon-lg{width:144px;height:144px;font-size:108px;line-height:140px}.bs-docs-booticon-inverse{color:#563d7c;background-color:#fff}.bs-docs-booticon-outline{background-color:transparent;border:1px solid #cdbfe3}#skippy{display:block;padding:1em;color:#fff;background-color:#6f5499;outline:0}#skippy .skiplink-text{padding:.5em;outline:1px dotted}#content:focus{outline:0}.bs-docs-nav{margin-bottom:0;background-color:#fff;border-bottom:0}.bs-home-nav .bs-nav-b{display:none}.bs-docs-nav .navbar-brand,.bs-docs-nav .navbar-nav>li>a{font-weight:500;color:#563d7c}.bs-docs-nav .navbar-nav>.active>a,.bs-docs-nav .navbar-nav>.active>a:hover,.bs-docs-nav .navbar-nav>li>a:hover{color:#463265;background-color:#f9f9f9}.bs-docs-nav .navbar-toggle .icon-bar{background-color:#563d7c}.bs-docs-nav .navbar-header .navbar-toggle{border-color:#fff}.bs-docs-nav .navbar-header .navbar-toggle:focus,.bs-docs-nav .navbar-header .navbar-toggle:hover{background-color:#f9f9f9;border-color:#f9f9f9}.bs-docs-footer{padding-top:50px;padding-bottom:50px;margin-top:100px;color:#99979c;text-align:center;background-color:#2a2730}.bs-docs-footer a{color:#fff}.bs-docs-footer-links{padding-left:0;margin-bottom:20px}.bs-docs-footer-links li{display:inline-block}.bs-docs-footer-links li+li{margin-left:15px}@media (min-width:768px){.bs-docs-footer{text-align:left}.bs-docs-footer p{margin-bottom:0}}.bs-docs-header,.bs-docs-masthead{position:relative;padding:30px 0;color:#cdbfe3;text-align:center;text-shadow:0 1px 0 rgba(0,0,0,.1);background-color:#6f5499;background-image:-webkit-gradient(linear,left top,left bottom,from(#563d7c),to(#6f5499));background-image:-webkit-linear-gradient(top,#563d7c 0,#6f5499 100%);background-image:-o-linear-gradient(top,#563d7c 0,#6f5499 100%);background-image:linear-gradient(to bottom,#563d7c 0,#6f5499 100%);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#563d7c', endColorstr='#6F5499', GradientType=0);background-repeat:repeat-x}.bs-docs-masthead .bs-docs-booticon{margin:0 auto 30px}.bs-docs-masthead h1{font-weight:300;line-height:1;color:#fff}.bs-docs-masthead .lead{margin:0 auto 30px;font-size:20px;color:#fff}.bs-docs-masthead .version{margin-top:-15px;margin-bottom:30px;color:#9783b9}.bs-docs-masthead .btn{width:100%;padding:15px 30px;font-size:20px}@media (min-width:480px){.bs-docs-masthead .btn{width:auto}}@media (min-width:768px){.bs-docs-masthead{padding:80px 0}.bs-docs-masthead h1{font-size:60px}.bs-docs-masthead .lead{font-size:24px}}@media (min-width:992px){.bs-docs-masthead .lead{width:80%;font-size:30px}}.bs-docs-header{margin-bottom:40px;font-size:20px}.bs-docs-header h1{margin-top:0;color:#fff}.bs-docs-header p{margin-bottom:0;font-weight:300;line-height:1.4}.bs-docs-header .container{position:relative}@media (min-width:768px){.bs-docs-header{padding-top:60px;padding-bottom:60px;font-size:24px;text-align:left}.bs-docs-header h1{font-size:60px;line-height:1}}@media (min-width:992px){.bs-docs-header h1,.bs-docs-header p{margin-right:380px}}.carbonad{width:auto!important;height:auto!important;padding:20px!important;margin:30px -15px -31px!important;overflow:hidden;font-size:13px!important;line-height:16px!important;text-align:left;background:0 0!important;border:solid #866ab3!important;border-width:1px 0!important}.carbonad-img{margin:0!important}.carbonad-tag,.carbonad-text{display:block!important;float:none!important;width:auto!important;height:auto!important;margin-left:145px!important;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif!important}.carbonad-text{padding-top:0!important}.carbonad-tag{color:inherit!important;text-align:left!important}.carbonad-tag a,.carbonad-text a{color:#fff!important}.carbonad #azcarbon>img{display:none}@media (min-width:480px){.carbonad{width:330px!important;margin:20px auto!important;border-width:1px!important;border-radius:4px}.bs-docs-masthead .carbonad{margin:50px auto 0!important}}@media (min-width:768px){.carbonad{margin-right:0!important;margin-left:0!important}}@media (min-width:992px){.carbonad{position:absolute;top:0;right:15px;width:330px!important;padding:15px!important;margin:0!important}.bs-docs-masthead .carbonad{position:static}}.bs-docs-featurette{padding-top:40px;padding-bottom:40px;font-size:16px;line-height:1.5;color:#555;text-align:center;background-color:#fff;border-bottom:1px solid #e5e5e5}.bs-docs-featurette+.bs-docs-footer{margin-top:0;border-top:0}.bs-docs-featurette-title{margin-bottom:5px;font-size:30px;font-weight:400;color:#333}.half-rule{width:100px;margin:40px auto}.bs-docs-featurette h3{margin-bottom:5px;font-weight:400;color:#333}.bs-docs-featurette-img{display:block;margin-bottom:20px;color:#333}.bs-docs-featurette-img:hover{color:#337ab7;text-decoration:none}.bs-docs-featurette-img img{display:block;margin-bottom:15px}@media (min-width:480px){.bs-docs-featurette .img-responsive{margin-top:30px}}@media (min-width:768px){.bs-docs-featurette{padding-top:100px;padding-bottom:100px}.bs-docs-featurette-title{font-size:40px}.bs-docs-featurette .lead{max-width:80%;margin-right:auto;margin-left:auto}.bs-docs-featurette .img-responsive{margin-top:0}}.bs-docs-featured-sites{margin-right:-1px;margin-left:-1px}.bs-docs-featured-sites .col-xs-6{padding:1px}.bs-docs-featured-sites .img-responsive{margin-top:0}@media (min-width:768px){.bs-docs-featured-sites .col-sm-3:first-child img{border-top-left-radius:4px;border-bottom-left-radius:4px}.bs-docs-featured-sites .col-sm-3:last-child img{border-top-right-radius:4px;border-bottom-right-radius:4px}}.bs-examples .thumbnail{margin-bottom:10px}.bs-examples h4{margin-bottom:5px}.bs-examples p{margin-bottom:20px}@media (max-width:480px){.bs-examples{margin-right:-10px;margin-left:-10px}.bs-examples>[class^=col-]{padding-right:10px;padding-left:10px}}.bs-docs-sidebar.affix{position:static}@media (min-width:768px){.bs-docs-sidebar{padding-left:20px}}.bs-docs-sidenav{margin-top:20px;margin-bottom:20px}.bs-docs-sidebar .nav>li>a{display:block;padding:4px 20px;font-size:13px;font-weight:500;color:#767676}.bs-docs-sidebar .nav>li>a:focus,.bs-docs-sidebar .nav>li>a:hover{padding-left:19px;color:#563d7c;text-decoration:none;background-color:transparent;border-left:1px solid #563d7c}.bs-docs-sidebar .nav>.active:focus>a,.bs-docs-sidebar .nav>.active:hover>a,.bs-docs-sidebar .nav>.active>a{padding-left:18px;font-weight:700;color:#563d7c;background-color:transparent;border-left:2px solid #563d7c}.bs-docs-sidebar .nav .nav{display:none;padding-bottom:10px}.bs-docs-sidebar .nav .nav>li>a{padding-top:1px;padding-bottom:1px;padding-left:30px;font-size:12px;font-weight:400}.bs-docs-sidebar .nav .nav>li>a:focus,.bs-docs-sidebar .nav .nav>li>a:hover{padding-left:29px}.bs-docs-sidebar .nav .nav>.active:focus>a,.bs-docs-sidebar .nav .nav>.active:hover>a,.bs-docs-sidebar .nav .nav>.active>a{padding-left:28px;font-weight:500}.back-to-top,.bs-docs-theme-toggle{display:none;padding:4px 10px;margin-top:10px;margin-left:10px;font-size:12px;font-weight:500;color:#999}.back-to-top:hover,.bs-docs-theme-toggle:hover{color:#563d7c;text-decoration:none}.bs-docs-theme-toggle{margin-top:0}@media (min-width:768px){.back-to-top,.bs-docs-theme-toggle{display:block}}@media (min-width:992px){.bs-docs-sidebar .nav>.active>ul{display:block}.bs-docs-sidebar.affix,.bs-docs-sidebar.affix-bottom{width:213px}.bs-docs-sidebar.affix{position:fixed;top:20px}.bs-docs-sidebar.affix-bottom{position:absolute}.bs-docs-sidebar.affix .bs-docs-sidenav,.bs-docs-sidebar.affix-bottom .bs-docs-sidenav{margin-top:0;margin-bottom:0}}@media (min-width:1200px){.bs-docs-sidebar.affix,.bs-docs-sidebar.affix-bottom{width:263px}}.bs-docs-section{margin-bottom:60px}.bs-docs-section:last-child{margin-bottom:0}h1[id]{padding-top:20px;margin-top:0}.bs-callout{padding:20px;margin:20px 0;border:1px solid #eee;border-left-width:5px;border-radius:3px}.bs-callout h4{margin-top:0;margin-bottom:5px}.bs-callout p:last-child{margin-bottom:0}.bs-callout code{border-radius:3px}.bs-callout+.bs-callout{margin-top:-5px}.bs-callout-danger{border-left-color:#ce4844}.bs-callout-danger h4{color:#ce4844}.bs-callout-warning{border-left-color:#aa6708}.bs-callout-warning h4{color:#aa6708}.bs-callout-info{border-left-color:#1b809e}.bs-callout-info h4{color:#1b809e}.color-swatches{margin:0 -5px;overflow:hidden}.color-swatch{float:left;width:60px;height:60px;margin:0 5px;border-radius:3px}@media (min-width:768px){.color-swatch{width:100px;height:100px}}.color-swatches .gray-darker{background-color:#222}.color-swatches .gray-dark{background-color:#333}.color-swatches .gray{background-color:#555}.color-swatches .gray-light{background-color:#999}.color-swatches .gray-lighter{background-color:#eee}.color-swatches .brand-primary{background-color:#337ab7}.color-swatches .brand-success{background-color:#5cb85c}.color-swatches .brand-warning{background-color:#f0ad4e}.color-swatches .brand-danger{background-color:#d9534f}.color-swatches .brand-info{background-color:#5bc0de}.color-swatches .bs-purple{background-color:#563d7c}.color-swatches .bs-purple-light{background-color:#c7bfd3}.color-swatches .bs-purple-lighter{background-color:#e5e1ea}.color-swatches .bs-gray{background-color:#f9f9f9}.bs-team .team-member{line-height:32px;color:#555}.bs-team .team-member:hover{color:#333;text-decoration:none}.bs-team .github-btn{float:right;width:180px;height:20px;margin-top:6px;border:none}.bs-team img{float:left;width:32px;margin-right:10px;border-radius:4px}.bs-docs-browser-bugs td p{margin-bottom:0}.bs-docs-browser-bugs th:first-child{width:18%}.show-grid{margin-bottom:15px}.show-grid [class^=col-]{padding-top:10px;padding-bottom:10px;background-color:#eee;background-color:rgba(86,61,124,.15);border:1px solid #ddd;border:1px solid rgba(86,61,124,.2)}.bs-example{position:relative;padding:45px 15px 15px;margin:0 -15px 15px;border-color:#e5e5e5 #eee #eee;border-style:solid;border-width:1px 0;-webkit-box-shadow:inset 0 3px 6px rgba(0,0,0,.05);box-shadow:inset 0 3px 6px rgba(0,0,0,.05)}.bs-example:after{position:absolute;top:15px;left:15px;font-size:12px;font-weight:700;color:#959595;text-transform:uppercase;letter-spacing:1px;content:"Example"}.bs-example-padded-bottom{padding-bottom:24px}.bs-example+.highlight,.bs-example+.zero-clipboard+.highlight{margin:-15px -15px 15px;border-width:0 0 1px;border-radius:0}@media (min-width:768px){.bs-example{margin-right:0;margin-left:0;background-color:#fff;border-color:#ddd;border-width:1px;border-radius:4px 4px 0 0;-webkit-box-shadow:none;box-shadow:none}.bs-example+.highlight,.bs-example+.zero-clipboard+.highlight{margin-top:-16px;margin-right:0;margin-left:0;border-width:1px;border-bottom-right-radius:4px;border-bottom-left-radius:4px}.bs-example-standalone{border-radius:4px}}.bs-example .container{width:auto}.bs-example>.alert:last-child,.bs-example>.form-control:last-child,.bs-example>.jumbotron:last-child,.bs-example>.list-group:last-child,.bs-example>.navbar:last-child,.bs-example>.panel:last-child,.bs-example>.progress:last-child,.bs-example>.table-responsive:last-child>.table,.bs-example>.table:last-child,.bs-example>.well:last-child,.bs-example>blockquote:last-child,.bs-example>ol:last-child,.bs-example>p:last-child,.bs-example>ul:last-child{margin-bottom:0}.bs-example>p>.close{float:none}.bs-example-type .table .type-info{color:#767676;vertical-align:middle}.bs-example-type .table td{padding:15px 0;border-color:#eee}.bs-example-type .table tr:first-child td{border-top:0}.bs-example-type h1,.bs-example-type h2,.bs-example-type h3,.bs-example-type h4,.bs-example-type h5,.bs-example-type h6{margin:0}.bs-example-bg-classes p{padding:15px}.bs-example>.img-circle,.bs-example>.img-rounded,.bs-example>.img-thumbnail{margin:5px}.bs-example>.table-responsive>.table{background-color:#fff}.bs-example>.btn,.bs-example>.btn-group{margin-top:5px;margin-bottom:5px}.bs-example>.btn-toolbar+.btn-toolbar{margin-top:10px}.bs-example-control-sizing input[type=text]+input[type=text],.bs-example-control-sizing select{margin-top:10px}.bs-example-form .input-group{margin-bottom:10px}.bs-example>textarea.form-control{resize:vertical}.bs-example>.list-group{max-width:400px}.bs-example .navbar:last-child{margin-bottom:0}.bs-navbar-bottom-example,.bs-navbar-top-example{z-index:1;padding:0;overflow:hidden}.bs-navbar-bottom-example .navbar-header,.bs-navbar-top-example .navbar-header{margin-left:0}.bs-navbar-bottom-example .navbar-fixed-bottom,.bs-navbar-top-example .navbar-fixed-top{position:relative;margin-right:0;margin-left:0}.bs-navbar-top-example{padding-bottom:45px}.bs-navbar-top-example:after{top:auto;bottom:15px}.bs-navbar-top-example .navbar-fixed-top{top:-1px}.bs-navbar-bottom-example{padding-top:45px}.bs-navbar-bottom-example .navbar-fixed-bottom{bottom:-1px}.bs-navbar-bottom-example .navbar{margin-bottom:0}@media (min-width:768px){.bs-navbar-bottom-example .navbar-fixed-bottom,.bs-navbar-top-example .navbar-fixed-top{position:absolute}}.bs-example .pagination{margin-top:10px;margin-bottom:10px}.bs-example>.pager{margin-top:0}.bs-example-modal{background-color:#f5f5f5}.bs-example-modal .modal{position:relative;top:auto;right:auto;bottom:auto;left:auto;z-index:1;display:block}.bs-example-modal .modal-dialog{left:auto;margin-right:auto;margin-left:auto}.bs-example>.dropdown>.dropdown-toggle{float:left}.bs-example>.dropdown>.dropdown-menu{position:static;display:block;margin-bottom:5px;clear:left}.bs-example-tabs .nav-tabs{margin-bottom:15px}.bs-example-tooltips{text-align:center}.bs-example-tooltips>.btn{margin-top:5px;margin-bottom:5px}.bs-example-tooltip .tooltip{position:relative;display:inline-block;margin:10px 20px;opacity:1}.bs-example-popover{padding-bottom:24px;background-color:#f9f9f9}.bs-example-popover .popover{position:relative;display:block;float:left;width:260px;margin:20px}.scrollspy-example{position:relative;height:200px;margin-top:10px;overflow:auto}.bs-example>.nav-pills-stacked-example{max-width:300px}#collapseExample .well{margin-bottom:0}.bs-events-table>tbody>tr>td:first-child,.bs-events-table>thead>tr>th:first-child{white-space:nowrap}.bs-events-table>thead>tr>th:first-child{width:150px}.js-options-table>thead>tr>th:nth-child(1),.js-options-table>thead>tr>th:nth-child(2){width:100px}.js-options-table>thead>tr>th:nth-child(3){width:50px}.highlight{padding:9px 14px;margin-bottom:14px;background-color:#f7f7f9;border:1px solid #e1e1e8;border-radius:4px}.highlight pre{padding:0;margin-top:0;margin-bottom:0;word-break:normal;white-space:nowrap;background-color:transparent;border:0}.highlight pre code{font-size:inherit;color:#333}.highlight pre code:first-child{display:inline-block;padding-right:45px}.table-responsive .highlight pre{white-space:normal}.bs-table th small,.responsive-utilities th small{display:block;font-weight:400;color:#999}.responsive-utilities tbody th{font-weight:400}.responsive-utilities td{text-align:center}.responsive-utilities td.is-visible{color:#468847;background-color:#dff0d8!important}.responsive-utilities td.is-hidden{color:#ccc;background-color:#f9f9f9!important}.responsive-utilities-test{margin-top:5px}.responsive-utilities-test .col-xs-6{margin-bottom:10px}.responsive-utilities-test span{display:block;padding:15px 10px;font-size:14px;font-weight:700;line-height:1.1;text-align:center;border-radius:4px}.hidden-on .col-xs-6 .hidden-lg,.hidden-on .col-xs-6 .hidden-md,.hidden-on .col-xs-6 .hidden-sm,.hidden-on .col-xs-6 .hidden-xs,.visible-on .col-xs-6 .hidden-lg,.visible-on .col-xs-6 .hidden-md,.visible-on .col-xs-6 .hidden-sm,.visible-on .col-xs-6 .hidden-xs{color:#999;border:1px solid #ddd}.hidden-on .col-xs-6 .visible-lg-block,.hidden-on .col-xs-6 .visible-md-block,.hidden-on .col-xs-6 .visible-sm-block,.hidden-on .col-xs-6 .visible-xs-block,.visible-on .col-xs-6 .visible-lg-block,.visible-on .col-xs-6 .visible-md-block,.visible-on .col-xs-6 .visible-sm-block,.visible-on .col-xs-6 .visible-xs-block{color:#468847;background-color:#dff0d8;border:1px solid #d6e9c6}.bs-glyphicons{margin:0 -10px 20px;overflow:hidden}.bs-glyphicons-list{padding-left:0;list-style:none}.bs-glyphicons li{float:left;width:25%;height:115px;padding:10px;font-size:10px;line-height:1.4;text-align:center;background-color:#f9f9f9;border:1px solid #fff}.bs-glyphicons .glyphicon{margin-top:5px;margin-bottom:10px;font-size:24px}.bs-glyphicons .glyphicon-class{display:block;text-align:center;word-wrap:break-word}.bs-glyphicons li:hover{color:#fff;background-color:#563d7c}@media (min-width:768px){.bs-glyphicons{margin-right:0;margin-left:0}.bs-glyphicons li{width:12.5%;font-size:12px}}.bs-customizer .toggle{float:right;margin-top:25px}.bs-customizer label{margin-top:10px;font-weight:500;color:#555}.bs-customizer h2{padding-top:30px;margin-top:0;margin-bottom:5px}.bs-customizer h3{margin-bottom:0}.bs-customizer h4{margin-top:15px;margin-bottom:0}.bs-customizer .bs-callout h4{margin-top:0;margin-bottom:5px}.bs-customizer input[type=text]{font-family:Menlo,Monaco,Consolas,"Courier New",monospace;background-color:#fafafa}.bs-customizer .help-block{margin-bottom:5px;font-size:12px}#less-section label{font-weight:400}.bs-customize-download .btn-outline{padding:20px}.bs-customizer-alert{position:fixed;top:0;right:0;left:0;z-index:1030;padding:15px 0;color:#fff;background-color:#d9534f;border-bottom:1px solid #b94441;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,.25);box-shadow:inset 0 1px 0 rgba(255,255,255,.25)}.bs-customizer-alert .close{margin-top:-4px;font-size:24px}.bs-customizer-alert p{margin-bottom:0}.bs-customizer-alert .glyphicon{margin-right:5px}.bs-customizer-alert pre{margin:10px 0 0;color:#fff;background-color:#a83c3a;border-color:#973634;-webkit-box-shadow:inset 0 2px 4px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1);box-shadow:inset 0 2px 4px rgba(0,0,0,.05),0 1px 0 rgba(255,255,255,.1)}.bs-dropzone{position:relative;padding:20px;margin-bottom:20px;color:#777;text-align:center;border:2px dashed #eee;border-radius:4px}.bs-dropzone .import-header{margin-bottom:5px}.bs-dropzone .glyphicon-download-alt{font-size:40px}.bs-dropzone hr{width:100px}.bs-dropzone .lead{margin-bottom:10px;font-weight:400;color:#333}#import-manual-trigger{cursor:pointer}.bs-dropzone p:last-child{margin-bottom:0}.bs-brand-logos{display:table;width:100%;margin-bottom:15px;overflow:hidden;color:#563d7c;background-color:#f9f9f9;border-radius:4px}.bs-brand-item{padding:60px 0;text-align:center}.bs-brand-item+.bs-brand-item{border-top:1px solid #fff}.bs-brand-logos .inverse{color:#fff;background-color:#563d7c}.bs-brand-item h1,.bs-brand-item h3{margin-top:0;margin-bottom:0}.bs-brand-item .bs-docs-booticon{margin-right:auto;margin-left:auto}.bs-brand-item .glyphicon{width:30px;height:30px;margin:10px auto -10px;line-height:30px;color:#fff;border-radius:50%}.bs-brand-item .glyphicon-ok{background-color:#5cb85c}.bs-brand-item .glyphicon-remove{background-color:#d9534f}@media (min-width:768px){.bs-brand-item{display:table-cell;width:1%}.bs-brand-item+.bs-brand-item{border-top:0;border-left:1px solid #fff}.bs-brand-item h1{font-size:60px}}.zero-clipboard{position:relative;display:none}.btn-clipboard{position:absolute;top:0;right:0;z-index:10;display:block;padding:5px 8px;font-size:12px;color:#767676;cursor:pointer;background-color:#fff;border:1px solid #e1e1e8;border-radius:0 4px 0 4px}.btn-clipboard-hover{color:#fff;background-color:#563d7c;border-color:#563d7c}@media (min-width:768px){.zero-clipboard{display:block}.bs-example+.zero-clipboard .btn-clipboard{top:-16px;border-top-right-radius:0}}.anchorjs-link{color:inherit}@media (max-width:480px){.anchorjs-link{display:none}}:hover>.anchorjs-link{opacity:.75;-webkit-transition:color .16s linear;-o-transition:color .16s linear;transition:color .16s linear}.anchorjs-link:focus,:hover>.anchorjs-link:hover{text-decoration:none;opacity:1}#focusedInput{border-color:#ccc;border-color:rgba(82,168,236,.8);outline:0;outline:thin dotted\9;-webkit-box-shadow:0 0 8px rgba(82,168,236,.6);box-shadow:0 0 8px rgba(82,168,236,.6)}.v4-tease{display:block;padding:15px 20px;font-weight:700;color:#fff;text-align:center;background-color:#0275d8}.v4-tease:hover{color:#fff;text-decoration:none;background-color:#0269c2}@media print{a[href]:after{content:""!important}}
11 | /*# sourceMappingURL=docs.min.css.map */
--------------------------------------------------------------------------------