├── .gitignore
├── .gitlab-ci.yml
├── LICENSE
├── NEWS.rst
├── README.rst
├── RELEASING.rst
├── ci
├── build-env
│ └── Dockerfile
└── phoenix
│ ├── Dockerfile
│ ├── docker-entrypoint.sh
│ └── hbase-site.xml
├── doc
├── Makefile
├── api.rst
├── conf.py
├── index.rst
└── versions.rst
├── docker-compose.yml
├── examples
├── basic.py
└── shell.py
├── gen-protobuf.sh
├── phoenixdb
├── __init__.py
├── avatica
│ ├── __init__.py
│ ├── client.py
│ └── proto
│ │ ├── __init__.py
│ │ ├── common_pb2.py
│ │ ├── requests_pb2.py
│ │ └── responses_pb2.py
├── connection.py
├── cursor.py
├── errors.py
├── tests
│ ├── __init__.py
│ ├── dbapi20.py
│ ├── test_avatica.py
│ ├── test_connection.py
│ ├── test_db.py
│ ├── test_dbapi20.py
│ ├── test_errors.py
│ └── test_types.py
└── types.py
├── requirements.txt
├── setup.cfg
├── setup.py
└── tox.ini
/.gitignore:
--------------------------------------------------------------------------------
1 | /dist/
2 | /build/
3 | /doc/_build/
4 | /doc/build/
5 | *.pyc
6 | *.egg-info/
7 | .vagrant/
8 |
9 |
--------------------------------------------------------------------------------
/.gitlab-ci.yml:
--------------------------------------------------------------------------------
1 | stages:
2 | - prepare
3 | - test
4 |
5 | build build-env image:
6 | stage: prepare
7 | script:
8 | - cd ci/build-env
9 | - docker build -t ${CI_REGISTRY_IMAGE}/build-env .
10 | - docker login -u gitlab-ci-token -p $CI_BUILD_TOKEN $CI_REGISTRY
11 | - docker push $CI_REGISTRY_IMAGE/build-env
12 | tags:
13 | - docker-host
14 | only:
15 | - master@lukas/python-phoenixdb
16 |
17 | .build-phoenix-image: &build_phoenix_image
18 | stage: prepare
19 | script:
20 | - JOB_NAME=($CI_JOB_NAME)
21 | - cd ci/phoenix
22 | - docker build -t ${CI_REGISTRY_IMAGE}/phoenix:${JOB_NAME[2]}
23 | --build-arg PHOENIX_VERSION=$PHOENIX_VERSION
24 | --build-arg HBASE_VERSION=$HBASE_VERSION
25 | --build-arg HBASE_DIR=$HBASE_DIR
26 | .
27 | - docker login -u gitlab-ci-token -p $CI_BUILD_TOKEN $CI_REGISTRY
28 | - docker push $CI_REGISTRY_IMAGE/phoenix:${JOB_NAME[2]}
29 | tags:
30 | - docker-host
31 |
32 | build phoenix 5.0.0-alpha-HBase-2.0 image:
33 | <<: *build_phoenix_image
34 | variables:
35 | PHOENIX_VERSION: 5.0.0-alpha-HBase-2.0
36 | HBASE_VERSION: 2.0.0-beta-1
37 | HBASE_DIR: hbase-2.0.0-beta-1
38 |
39 | build phoenix 4.13 image:
40 | <<: *build_phoenix_image
41 | variables:
42 | PHOENIX_VERSION: 4.13.1-HBase-1.3
43 | HBASE_VERSION: 1.3.1
44 | HBASE_DIR: 1.3.1
45 |
46 | build phoenix 4.12 image:
47 | <<: *build_phoenix_image
48 | variables:
49 | PHOENIX_VERSION: 4.12.0-HBase-1.3
50 | HBASE_VERSION: 1.3.1
51 | HBASE_DIR: 1.3.1
52 |
53 | build phoenix 4.11 image:
54 | <<: *build_phoenix_image
55 | variables:
56 | PHOENIX_VERSION: 4.11.0-HBase-1.3
57 | HBASE_VERSION: 1.3.1
58 | HBASE_DIR: 1.3.1
59 |
60 | build phoenix 4.10 image:
61 | <<: *build_phoenix_image
62 | variables:
63 | PHOENIX_VERSION: 4.10.0-HBase-1.2
64 | HBASE_VERSION: 1.2.6
65 | HBASE_DIR: 1.2.6
66 |
67 | build phoenix 4.9 image:
68 | <<: *build_phoenix_image
69 | variables:
70 | PHOENIX_VERSION: 4.9.0-HBase-1.2
71 | HBASE_VERSION: 1.2.6
72 | HBASE_DIR: 1.2.6
73 |
74 | build phoenix 4.8 image:
75 | <<: *build_phoenix_image
76 | variables:
77 | PHOENIX_VERSION: 4.8.2-HBase-1.2
78 | HBASE_VERSION: 1.2.6
79 | HBASE_DIR: 1.2.6
80 |
81 | .test: &test
82 | image: $CI_REGISTRY_IMAGE/build-env
83 | variables:
84 | PHOENIXDB_TEST_DB_URL: http://phoenix:8765/
85 | PIP_CACHE_DIR: $CI_PROJECT_DIR/cache/
86 | script:
87 | - tox -e py27,py35
88 | cache:
89 | paths:
90 | - cache/
91 | tags:
92 | - docker
93 |
94 | test phoenix 5.0.0-alpha-HBase-2.0:
95 | <<: *test
96 | services:
97 | - name: $CI_REGISTRY_IMAGE/phoenix:5.0.0-alpha-HBase-2.0
98 | alias: phoenix
99 |
100 | test phoenix 4.13:
101 | <<: *test
102 | services:
103 | - name: $CI_REGISTRY_IMAGE/phoenix:4.13
104 | alias: phoenix
105 |
106 | test phoenix 4.12:
107 | <<: *test
108 | services:
109 | - name: $CI_REGISTRY_IMAGE/phoenix:4.12
110 | alias: phoenix
111 |
112 | test phoenix 4.11:
113 | <<: *test
114 | services:
115 | - name: $CI_REGISTRY_IMAGE/phoenix:4.11
116 | alias: phoenix
117 |
118 | test phoenix 4.10:
119 | <<: *test
120 | services:
121 | - name: $CI_REGISTRY_IMAGE/phoenix:4.10
122 | alias: phoenix
123 |
124 | test phoenix 4.9:
125 | <<: *test
126 | services:
127 | - name: $CI_REGISTRY_IMAGE/phoenix:4.9
128 | alias: phoenix
129 |
130 | test phoenix 4.8:
131 | <<: *test
132 | services:
133 | - name: $CI_REGISTRY_IMAGE/phoenix:4.8
134 | alias: phoenix
135 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 | END OF TERMS AND CONDITIONS
178 |
179 | APPENDIX: How to apply the Apache License to your work.
180 |
181 | To apply the Apache License to your work, attach the following
182 | boilerplate notice, with the fields enclosed by brackets "[]"
183 | replaced with your own identifying information. (Don't include
184 | the brackets!) The text should be enclosed in the appropriate
185 | comment syntax for the file format. We also recommend that a
186 | file or class name and description of purpose be included on the
187 | same "printed page" as the copyright notice for easier
188 | identification within third-party archives.
189 |
190 | Copyright [yyyy] [name of copyright owner]
191 |
192 | Licensed under the Apache License, Version 2.0 (the "License");
193 | you may not use this file except in compliance with the License.
194 | You may obtain a copy of the License at
195 |
196 | http://www.apache.org/licenses/LICENSE-2.0
197 |
198 | Unless required by applicable law or agreed to in writing, software
199 | distributed under the License is distributed on an "AS IS" BASIS,
200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
201 | See the License for the specific language governing permissions and
202 | limitations under the License.
203 |
--------------------------------------------------------------------------------
/NEWS.rst:
--------------------------------------------------------------------------------
1 | Changelog
2 | =========
3 |
4 | Version 0.7
5 | -----------
6 |
7 | - Added DictCursor for easier access to columns by their names.
8 | - Support for Phoenix versions from 4.8 to 4.11.
9 |
10 | Version 0.6
11 | -----------
12 |
13 | - Fixed result fetching when using a query with parameters.
14 | - Support for Phoenix 4.9.
15 |
16 | Version 0.5
17 | -----------
18 |
19 | - Added support for Python 3.
20 | - Switched from the JSON serialization to Protocol Buffers, improved compatibility with Phoenix 4.8.
21 | - Phoenix 4.6 and older are no longer supported.
22 |
23 | Version 0.4
24 | -----------
25 |
26 | - Fixes for the final version of Phoenix 4.7.
27 |
28 | Version 0.3
29 | -----------
30 |
31 | - Compatible with Phoenix 4.7.
32 |
33 | Version 0.2
34 | -----------
35 |
36 | - Added (configurable) retry on connection errors.
37 | - Added Vagrantfile for easier testing.
38 | - Compatible with Phoenix 4.6.
39 |
40 | Version 0.1
41 | -----------
42 |
43 | - Initial release.
44 | - Compatible with Phoenix 4.4.
45 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | Phoenix database adapter for Python
2 | ===================================
3 |
4 | .. image:: https://code.oxygene.sk/lukas/python-phoenixdb/badges/master/pipeline.svg
5 | :target: https://code.oxygene.sk/lukas/python-phoenixdb/commits/master
6 | :alt: Build Status
7 |
8 | .. image:: https://readthedocs.org/projects/python-phoenixdb/badge/?version=latest
9 | :target: http://python-phoenixdb.readthedocs.io/en/latest/?badge=latest
10 | :alt: Documentation Status
11 |
12 | ``phoenixdb`` is a Python library for accessing the
13 | `Phoenix SQL database `_
14 | using the
15 | `remote query server `_.
16 | The library implements the
17 | standard `DB API 2.0 `_ interface,
18 | which should be familiar to most Python programmers.
19 |
20 | Installation
21 | ------------
22 |
23 | The easiest way to install the library is using `pip `_::
24 |
25 | pip install phoenixdb
26 |
27 | You can also download the source code from `GitHub `_,
28 | extract the archive and then install it manually::
29 |
30 | cd /path/to/python-phoenix-x.y.z/
31 | python setup.py install
32 |
33 | Usage
34 | -----
35 |
36 | The library implements the standard DB API 2.0 interface, so it can be
37 | used the same way you would use any other SQL database from Python, for example::
38 |
39 | import phoenixdb
40 | import phoenixdb.cursor
41 |
42 | database_url = 'http://localhost:8765/'
43 | conn = phoenixdb.connect(database_url, autocommit=True)
44 |
45 | cursor = conn.cursor()
46 | cursor.execute("CREATE TABLE users (id INTEGER PRIMARY KEY, username VARCHAR)")
47 | cursor.execute("UPSERT INTO users VALUES (?, ?)", (1, 'admin'))
48 | cursor.execute("SELECT * FROM users")
49 | print(cursor.fetchall())
50 |
51 | cursor = conn.cursor(cursor_factory=phoenixdb.cursor.DictCursor)
52 | cursor.execute("SELECT * FROM users WHERE id=1")
53 | print(cursor.fetchone()['USERNAME'])
54 |
55 |
56 | Setting up a development environment
57 | ------------------------------------
58 |
59 | If you want to quickly try out the included examples, you can set up a
60 | local `virtualenv `_ with all the
61 | necessary requirements::
62 |
63 | virtualenv e
64 | source e/bin/activate
65 | pip install -r requirements.txt
66 | python setup.py develop
67 |
68 | To create or update the Avatica protobuf classes, change the tag in ``gen-protobuf.sh``
69 | and run the script.
70 |
71 | If you need a Phoenix query server for experimenting, you can get one running
72 | quickly using `Docker `_::
73 |
74 | docker-compose up
75 |
76 | Or if you need an older version of Phoenix::
77 |
78 | PHOENIX_VERSION=4.9 docker-compose up
79 |
80 | Interactive SQL shell
81 | ---------------------
82 |
83 | There is a Python-based interactive shell include in the examples folder, which can be
84 | used to connect to Phoenix and execute queries::
85 |
86 | ./examples/shell.py http://localhost:8765/
87 | db=> CREATE TABLE test (id INTEGER PRIMARY KEY, name VARCHAR);
88 | no rows affected (1.363 seconds)
89 | db=> UPSERT INTO test (id, name) VALUES (1, 'Lukas');
90 | 1 row affected (0.004 seconds)
91 | db=> SELECT * FROM test;
92 | +------+-------+
93 | | ID | NAME |
94 | +======+=======+
95 | | 1 | Lukas |
96 | +------+-------+
97 | 1 row selected (0.019 seconds)
98 |
99 | Running the test suite
100 | ----------------------
101 |
102 | The library comes with a test suite for testing Python DB API 2.0 compliance and
103 | various Phoenix-specific features. In order to run the test suite, you need a
104 | working Phoenix database and set the ``PHOENIXDB_TEST_DB_URL`` environment variable::
105 |
106 | export PHOENIXDB_TEST_DB_URL='http://localhost:8765/'
107 | nosetests
108 |
109 | Commits to the master branch are automatically tested against all supported versions of Phoenix.
110 | You can see the results `here `_.
111 |
112 | Known issues
113 | ------------
114 |
115 | - You can only use the library in autocommit mode. The native Java Phoenix library also implements batched upserts, which can be committed at once, but this is not exposed over the remote server.
116 | (`CALCITE-767 `_)
117 | - TIME and DATE columns in Phoenix are stored as full timestamps with a millisecond accuracy,
118 | but the remote protocol only exposes the time (hour/minute/second) or date (year/month/day)
119 | parts of the columns. (`CALCITE-797 `_, `CALCITE-798 `_)
120 | - TIMESTAMP columns in Phoenix are stored with a nanosecond accuracy, but the remote protocol truncates them to milliseconds. (`CALCITE-796 `_)
121 | - ARRAY columns are not supported.
122 | (`CALCITE-1050 `_, `PHOENIX-2585 `_)
123 |
--------------------------------------------------------------------------------
/RELEASING.rst:
--------------------------------------------------------------------------------
1 | Releasing a new version
2 | =======================
3 |
4 | Change the version number ``setup.py`` and ``NEWS.rst``.
5 |
6 | Commit the changes and tag the repository::
7 |
8 | git tag -s vX.Y
9 |
10 | Upload the package to PyPI::
11 |
12 | python setup.py clean sdist upload
13 |
--------------------------------------------------------------------------------
/ci/build-env/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:xenial
2 |
3 | RUN apt-get update && \
4 | DEBIAN_FRONTEND=noninteractive apt-get install -y python-dev python3-dev tox
5 |
6 | RUN apt-get update && \
7 | DEBIAN_FRONTEND=noninteractive apt-get install -y git
8 |
--------------------------------------------------------------------------------
/ci/phoenix/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM openjdk:8
2 |
3 | ARG HBASE_VERSION
4 | ARG HBASE_DIR
5 | ARG PHOENIX_VERSION
6 | ARG PHOENIX_NAME=apache-phoenix
7 |
8 | ENV HBASE_URL https://archive.apache.org/dist/hbase/$HBASE_DIR/hbase-$HBASE_VERSION-bin.tar.gz
9 |
10 | RUN wget --no-verbose -O hbase.tar.gz "$HBASE_URL" && \
11 | mkdir /opt/hbase && \
12 | tar xf hbase.tar.gz --strip-components=1 -C /opt/hbase && \
13 | rm hbase.tar.gz
14 |
15 | ENV PHOENIX_URL https://archive.apache.org/dist/phoenix/apache-phoenix-$PHOENIX_VERSION/bin/apache-phoenix-$PHOENIX_VERSION-bin.tar.gz
16 |
17 | RUN wget --no-verbose -O phoenix.tar.gz "$PHOENIX_URL" && \
18 | mkdir /opt/phoenix && \
19 | tar xf phoenix.tar.gz --strip-components=1 -C /opt/phoenix && \
20 | rm phoenix.tar.gz
21 |
22 | RUN ln -sv /opt/phoenix/phoenix-*-server.jar /opt/hbase/lib/
23 |
24 | ADD hbase-site.xml /opt/hbase/conf/hbase-site.xml
25 |
26 | ENV HBASE_CONF_DIR /opt/hbase/conf
27 | ENV HBASE_CP /opt/hbase/lib
28 | ENV HBASE_HOME /opt/hbase
29 |
30 | EXPOSE 8765
31 |
32 | COPY docker-entrypoint.sh /usr/local/bin/
33 | ENTRYPOINT ["docker-entrypoint.sh"]
34 |
--------------------------------------------------------------------------------
/ci/phoenix/docker-entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | pids=()
4 |
5 | /opt/hbase/bin/hbase-daemon.sh foreground_start master &
6 | pids+=($!)
7 |
8 | /opt/phoenix/bin/queryserver.py &
9 | pids+=($!)
10 |
11 | cleanup() {
12 | if [ ${#pids[@]} -ne 0 ]
13 | then
14 | pids=($(ps -o pid= -p "${pids[@]}"))
15 | if [ ${#pids[@]} -ne 0 ]
16 | then
17 | kill "${pids[@]}"
18 | fi
19 | fi
20 | }
21 |
22 | trap cleanup SIGCHLD SIGINT SIGTERM
23 |
24 | wait
25 |
--------------------------------------------------------------------------------
/ci/phoenix/hbase-site.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | hbase.regionserver.wal.codec
6 | org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec
7 |
8 |
9 | phoenix.schema.isNamespaceMappingEnabled
10 | true
11 |
12 |
13 |
--------------------------------------------------------------------------------
/doc/Makefile:
--------------------------------------------------------------------------------
1 | # Makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | PAPER =
8 | BUILDDIR = _build
9 |
10 | # User-friendly check for sphinx-build
11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1)
12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/)
13 | endif
14 |
15 | # Internal variables.
16 | PAPEROPT_a4 = -D latex_paper_size=a4
17 | PAPEROPT_letter = -D latex_paper_size=letter
18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
19 | # the i18n builder cannot share the environment and doctrees with the others
20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
21 |
22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest coverage gettext
23 |
24 | help:
25 | @echo "Please use \`make ' where is one of"
26 | @echo " html to make standalone HTML files"
27 | @echo " dirhtml to make HTML files named index.html in directories"
28 | @echo " singlehtml to make a single large HTML file"
29 | @echo " pickle to make pickle files"
30 | @echo " json to make JSON files"
31 | @echo " htmlhelp to make HTML files and a HTML help project"
32 | @echo " qthelp to make HTML files and a qthelp project"
33 | @echo " applehelp to make an Apple Help Book"
34 | @echo " devhelp to make HTML files and a Devhelp project"
35 | @echo " epub to make an epub"
36 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
37 | @echo " latexpdf to make LaTeX files and run them through pdflatex"
38 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
39 | @echo " text to make text files"
40 | @echo " man to make manual pages"
41 | @echo " texinfo to make Texinfo files"
42 | @echo " info to make Texinfo files and run them through makeinfo"
43 | @echo " gettext to make PO message catalogs"
44 | @echo " changes to make an overview of all changed/added/deprecated items"
45 | @echo " xml to make Docutils-native XML files"
46 | @echo " pseudoxml to make pseudoxml-XML files for display purposes"
47 | @echo " linkcheck to check all external links for integrity"
48 | @echo " doctest to run all doctests embedded in the documentation (if enabled)"
49 | @echo " coverage to run coverage check of the documentation (if enabled)"
50 |
51 | clean:
52 | rm -rf $(BUILDDIR)/*
53 |
54 | html:
55 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
56 | @echo
57 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
58 |
59 | dirhtml:
60 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
61 | @echo
62 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
63 |
64 | singlehtml:
65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
66 | @echo
67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
68 |
69 | pickle:
70 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
71 | @echo
72 | @echo "Build finished; now you can process the pickle files."
73 |
74 | json:
75 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
76 | @echo
77 | @echo "Build finished; now you can process the JSON files."
78 |
79 | htmlhelp:
80 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
81 | @echo
82 | @echo "Build finished; now you can run HTML Help Workshop with the" \
83 | ".hhp project file in $(BUILDDIR)/htmlhelp."
84 |
85 | qthelp:
86 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
87 | @echo
88 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \
89 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
90 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/phoenixdb.qhcp"
91 | @echo "To view the help file:"
92 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/phoenixdb.qhc"
93 |
94 | applehelp:
95 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp
96 | @echo
97 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp."
98 | @echo "N.B. You won't be able to view it unless you put it in" \
99 | "~/Library/Documentation/Help or install it in your application" \
100 | "bundle."
101 |
102 | devhelp:
103 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
104 | @echo
105 | @echo "Build finished."
106 | @echo "To view the help file:"
107 | @echo "# mkdir -p $$HOME/.local/share/devhelp/phoenixdb"
108 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/phoenixdb"
109 | @echo "# devhelp"
110 |
111 | epub:
112 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
113 | @echo
114 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
115 |
116 | latex:
117 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
118 | @echo
119 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
120 | @echo "Run \`make' in that directory to run these through (pdf)latex" \
121 | "(use \`make latexpdf' here to do that automatically)."
122 |
123 | latexpdf:
124 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
125 | @echo "Running LaTeX files through pdflatex..."
126 | $(MAKE) -C $(BUILDDIR)/latex all-pdf
127 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
128 |
129 | latexpdfja:
130 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
131 | @echo "Running LaTeX files through platex and dvipdfmx..."
132 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
133 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
134 |
135 | text:
136 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
137 | @echo
138 | @echo "Build finished. The text files are in $(BUILDDIR)/text."
139 |
140 | man:
141 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
142 | @echo
143 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
144 |
145 | texinfo:
146 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
147 | @echo
148 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
149 | @echo "Run \`make' in that directory to run these through makeinfo" \
150 | "(use \`make info' here to do that automatically)."
151 |
152 | info:
153 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
154 | @echo "Running Texinfo files through makeinfo..."
155 | make -C $(BUILDDIR)/texinfo info
156 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
157 |
158 | gettext:
159 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
160 | @echo
161 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
162 |
163 | changes:
164 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
165 | @echo
166 | @echo "The overview file is in $(BUILDDIR)/changes."
167 |
168 | linkcheck:
169 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
170 | @echo
171 | @echo "Link check complete; look for any errors in the above output " \
172 | "or in $(BUILDDIR)/linkcheck/output.txt."
173 |
174 | doctest:
175 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
176 | @echo "Testing of doctests in the sources finished, look at the " \
177 | "results in $(BUILDDIR)/doctest/output.txt."
178 |
179 | coverage:
180 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage
181 | @echo "Testing of coverage in the sources finished, look at the " \
182 | "results in $(BUILDDIR)/coverage/python.txt."
183 |
184 | xml:
185 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml
186 | @echo
187 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml."
188 |
189 | pseudoxml:
190 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml
191 | @echo
192 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml."
193 |
--------------------------------------------------------------------------------
/doc/api.rst:
--------------------------------------------------------------------------------
1 | API Reference
2 | =============
3 |
4 | phoenixdb module
5 | ----------------
6 |
7 | .. automodule:: phoenixdb
8 | :members:
9 | :undoc-members:
10 |
11 | phoenixdb.connection module
12 | ---------------------------
13 |
14 | .. automodule:: phoenixdb.connection
15 | :members:
16 | :undoc-members:
17 |
18 | phoenixdb.cursor module
19 | -----------------------
20 |
21 | .. automodule:: phoenixdb.cursor
22 | :members:
23 | :undoc-members:
24 |
25 | phoenixdb.avatica module
26 | ------------------------
27 |
28 | .. automodule:: phoenixdb.avatica
29 | :members:
30 | :undoc-members:
31 |
--------------------------------------------------------------------------------
/doc/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # phoenixdb documentation build configuration file, created by
4 | # sphinx-quickstart on Sun Jun 28 18:07:35 2015.
5 | #
6 | # This file is execfile()d with the current directory set to its
7 | # containing dir.
8 | #
9 | # Note that not all possible configuration values are present in this
10 | # autogenerated file.
11 | #
12 | # All configuration values have a default; values that are commented out
13 | # serve to show the default.
14 |
15 | import sys
16 | import os
17 | import shlex
18 |
19 | # If extensions (or modules to document with autodoc) are in another directory,
20 | # add these directories to sys.path here. If the directory is relative to the
21 | # documentation root, use os.path.abspath to make it absolute, like shown here.
22 | sys.path.insert(0, os.path.abspath('../phoenixdb'))
23 |
24 | # -- General configuration ------------------------------------------------
25 |
26 | # If your documentation needs a minimal Sphinx version, state it here.
27 | #needs_sphinx = '1.0'
28 |
29 | # Add any Sphinx extension module names here, as strings. They can be
30 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
31 | # ones.
32 | extensions = [
33 | 'sphinx.ext.autodoc',
34 | 'sphinx.ext.doctest',
35 | 'sphinx.ext.intersphinx',
36 | ]
37 |
38 | # Add any paths that contain templates here, relative to this directory.
39 | templates_path = ['_templates']
40 |
41 | # The suffix(es) of source filenames.
42 | # You can specify multiple suffix as a list of string:
43 | # source_suffix = ['.rst', '.md']
44 | source_suffix = '.rst'
45 |
46 | # The encoding of source files.
47 | source_encoding = 'utf-8-sig'
48 |
49 | # The master toctree document.
50 | master_doc = 'index'
51 |
52 | # General information about the project.
53 | project = u'phoenixdb'
54 | copyright = u'2015, Lukas Lalinsky'
55 | author = u'Lukas Lalinsky'
56 |
57 | # The version info for the project you're documenting, acts as replacement for
58 | # |version| and |release|, also used in various other places throughout the
59 | # built documents.
60 |
61 | # The language for content autogenerated by Sphinx. Refer to documentation
62 | # for a list of supported languages.
63 | #
64 | # This is also used if you do content translation via gettext catalogs.
65 | # Usually you set "language" from the command line for these cases.
66 | language = None
67 |
68 | # There are two options for replacing |today|: either, you set today to some
69 | # non-false value, then it is used:
70 | #today = ''
71 | # Else, today_fmt is used as the format for a strftime call.
72 | #today_fmt = '%B %d, %Y'
73 |
74 | # List of patterns, relative to source directory, that match files and
75 | # directories to ignore when looking for source files.
76 | exclude_patterns = ['_build']
77 |
78 | # The reST default role (used for this markup: `text`) to use for all
79 | # documents.
80 | #default_role = None
81 |
82 | # If true, '()' will be appended to :func: etc. cross-reference text.
83 | #add_function_parentheses = True
84 |
85 | # If true, the current module name will be prepended to all description
86 | # unit titles (such as .. function::).
87 | #add_module_names = True
88 |
89 | # If true, sectionauthor and moduleauthor directives will be shown in the
90 | # output. They are ignored by default.
91 | #show_authors = False
92 |
93 | # The name of the Pygments (syntax highlighting) style to use.
94 | pygments_style = 'sphinx'
95 |
96 | # A list of ignored prefixes for module index sorting.
97 | #modindex_common_prefix = []
98 |
99 | # If true, keep warnings as "system message" paragraphs in the built documents.
100 | #keep_warnings = False
101 |
102 | # If true, `todo` and `todoList` produce output, else they produce nothing.
103 | todo_include_todos = False
104 |
105 |
106 | # -- Options for HTML output ----------------------------------------------
107 |
108 | # The theme to use for HTML and HTML Help pages. See the documentation for
109 | # a list of builtin themes.
110 | html_theme = 'classic'
111 |
112 | # Theme options are theme-specific and customize the look and feel of a theme
113 | # further. For a list of options available for each theme, see the
114 | # documentation.
115 | #html_theme_options = {}
116 |
117 | # Add any paths that contain custom themes here, relative to this directory.
118 | #html_theme_path = []
119 |
120 | # The name for this set of Sphinx documents. If None, it defaults to
121 | # " v documentation".
122 | #html_title = None
123 |
124 | # A shorter title for the navigation bar. Default is the same as html_title.
125 | #html_short_title = None
126 |
127 | # The name of an image file (relative to this directory) to place at the top
128 | # of the sidebar.
129 | #html_logo = None
130 |
131 | # The name of an image file (within the static path) to use as favicon of the
132 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
133 | # pixels large.
134 | #html_favicon = None
135 |
136 | # Add any paths that contain custom static files (such as style sheets) here,
137 | # relative to this directory. They are copied after the builtin static files,
138 | # so a file named "default.css" will overwrite the builtin "default.css".
139 | html_static_path = ['_static']
140 |
141 | # Add any extra paths that contain custom files (such as robots.txt or
142 | # .htaccess) here, relative to this directory. These files are copied
143 | # directly to the root of the documentation.
144 | #html_extra_path = []
145 |
146 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
147 | # using the given strftime format.
148 | #html_last_updated_fmt = '%b %d, %Y'
149 |
150 | # If true, SmartyPants will be used to convert quotes and dashes to
151 | # typographically correct entities.
152 | #html_use_smartypants = True
153 |
154 | # Custom sidebar templates, maps document names to template names.
155 | #html_sidebars = {}
156 |
157 | # Additional templates that should be rendered to pages, maps page names to
158 | # template names.
159 | #html_additional_pages = {}
160 |
161 | # If false, no module index is generated.
162 | #html_domain_indices = True
163 |
164 | # If false, no index is generated.
165 | #html_use_index = True
166 |
167 | # If true, the index is split into individual pages for each letter.
168 | #html_split_index = False
169 |
170 | # If true, links to the reST sources are added to the pages.
171 | html_show_sourcelink = False
172 |
173 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
174 | #html_show_sphinx = True
175 |
176 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
177 | #html_show_copyright = True
178 |
179 | # If true, an OpenSearch description file will be output, and all pages will
180 | # contain a tag referring to it. The value of this option must be the
181 | # base URL from which the finished HTML is served.
182 | #html_use_opensearch = ''
183 |
184 | # This is the file name suffix for HTML files (e.g. ".xhtml").
185 | #html_file_suffix = None
186 |
187 | # Language to be used for generating the HTML full-text search index.
188 | # Sphinx supports the following languages:
189 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
190 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
191 | #html_search_language = 'en'
192 |
193 | # A dictionary with options for the search language support, empty by default.
194 | # Now only 'ja' uses this config value
195 | #html_search_options = {'type': 'default'}
196 |
197 | # The name of a javascript file (relative to the configuration directory) that
198 | # implements a search results scorer. If empty, the default will be used.
199 | #html_search_scorer = 'scorer.js'
200 |
201 | # Output file base name for HTML help builder.
202 | htmlhelp_basename = 'phoenixdbdoc'
203 |
204 | # -- Options for LaTeX output ---------------------------------------------
205 |
206 | #latex_elements = {
207 | # The paper size ('letterpaper' or 'a4paper').
208 | #'papersize': 'letterpaper',
209 |
210 | # The font size ('10pt', '11pt' or '12pt').
211 | #'pointsize': '10pt',
212 |
213 | # Additional stuff for the LaTeX preamble.
214 | #'preamble': '',
215 |
216 | # Latex figure (float) alignment
217 | #'figure_align': 'htbp',
218 | #}
219 |
220 | # Grouping the document tree into LaTeX files. List of tuples
221 | # (source start file, target name, title,
222 | # author, documentclass [howto, manual, or own class]).
223 | #latex_documents = [
224 | # (master_doc, 'phoenixdb.tex', u'phoenixdb Documentation',
225 | # u'Lukas Lalinsky', 'manual'),
226 | #]
227 |
228 | # The name of an image file (relative to this directory) to place at the top of
229 | # the title page.
230 | #latex_logo = None
231 |
232 | # For "manual" documents, if this is true, then toplevel headings are parts,
233 | # not chapters.
234 | #latex_use_parts = False
235 |
236 | # If true, show page references after internal links.
237 | #latex_show_pagerefs = False
238 |
239 | # If true, show URL addresses after external links.
240 | #latex_show_urls = False
241 |
242 | # Documents to append as an appendix to all manuals.
243 | #latex_appendices = []
244 |
245 | # If false, no module index is generated.
246 | #latex_domain_indices = True
247 |
248 |
249 | # -- Options for manual page output ---------------------------------------
250 |
251 | # One entry per manual page. List of tuples
252 | # (source start file, name, description, authors, manual section).
253 | man_pages = [
254 | (master_doc, 'phoenixdb', u'phoenixdb Documentation',
255 | [author], 1)
256 | ]
257 |
258 | # If true, show URL addresses after external links.
259 | #man_show_urls = False
260 |
261 |
262 | # -- Options for Texinfo output -------------------------------------------
263 |
264 | # Grouping the document tree into Texinfo files. List of tuples
265 | # (source start file, target name, title, author,
266 | # dir menu entry, description, category)
267 | texinfo_documents = [
268 | (master_doc, 'phoenixdb', u'phoenixdb Documentation',
269 | author, 'phoenixdb', 'One line description of project.',
270 | 'Miscellaneous'),
271 | ]
272 |
273 | # Documents to append as an appendix to all manuals.
274 | #texinfo_appendices = []
275 |
276 | # If false, no module index is generated.
277 | #texinfo_domain_indices = True
278 |
279 | # How to display URL addresses: 'footnote', 'no', or 'inline'.
280 | #texinfo_show_urls = 'footnote'
281 |
282 | # If true, do not generate a @detailmenu in the "Top" node's menu.
283 | #texinfo_no_detailmenu = False
284 |
285 |
286 | # Example configuration for intersphinx: refer to the Python standard library.
287 | intersphinx_mapping = {'https://docs.python.org/': None}
288 |
--------------------------------------------------------------------------------
/doc/index.rst:
--------------------------------------------------------------------------------
1 | .. include:: ../README.rst
2 |
3 | API Reference
4 | -------------
5 |
6 | .. toctree::
7 | :maxdepth: 2
8 |
9 | api
10 |
11 | Changelog
12 | -------------
13 |
14 | .. toctree::
15 | :maxdepth: 2
16 |
17 | versions
18 |
19 | Indices and tables
20 | ==================
21 |
22 | * :ref:`genindex`
23 | * :ref:`modindex`
24 | * :ref:`search`
25 |
26 |
27 | .. _
28 |
--------------------------------------------------------------------------------
/doc/versions.rst:
--------------------------------------------------------------------------------
1 | .. include:: ../NEWS.rst
2 |
3 | .. _
4 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3"
2 | services:
3 | phoenix:
4 | image: docker.oxygene.sk/lukas/python-phoenixdb/phoenix:${PHOENIX_VERSION:-4.11}
5 | ports:
6 | - "127.0.0.1:8765:8765"
7 |
--------------------------------------------------------------------------------
/examples/basic.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import phoenixdb
4 |
5 | with phoenixdb.connect('http://localhost:8765/', autocommit=True) as connection:
6 | with connection.cursor() as cursor:
7 | cursor.execute("DROP TABLE IF EXISTS test")
8 | cursor.execute("CREATE TABLE test (id INTEGER PRIMARY KEY, text VARCHAR)")
9 | cursor.executemany("UPSERT INTO test VALUES (?, ?)", [[1, 'hello'], [2, 'world']])
10 | cursor.execute("SELECT * FROM test ORDER BY id")
11 | for row in cursor:
12 | print(row)
13 |
--------------------------------------------------------------------------------
/examples/shell.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import logging
4 | import argparse
5 | import sqlline
6 |
7 | parser = argparse.ArgumentParser()
8 | parser.add_argument('--debug', '-d', action='store_true')
9 | parser.add_argument('url')
10 | args = parser.parse_args()
11 |
12 | if args.debug:
13 | logging.basicConfig(level=logging.DEBUG)
14 |
15 | with sqlline.SqlLine() as sqlline:
16 | sqlline.connect('phoenixdb', args.url)
17 | sqlline.connection.autocommit = True
18 | sqlline.run()
19 |
--------------------------------------------------------------------------------
/gen-protobuf.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | AVATICA_VER=rel/avatica-1.10.0
4 |
5 | set -e
6 |
7 | rm -rf avatica-tmp
8 |
9 | mkdir avatica-tmp
10 | cd avatica-tmp
11 | wget -O avatica.tar.gz https://github.com/apache/calcite-avatica/archive/$AVATICA_VER.tar.gz
12 | tar -x --strip-components=1 -f avatica.tar.gz
13 |
14 | cd ..
15 | rm -f phoenixdb/avatica/proto/*_pb2.py
16 | protoc --proto_path=avatica-tmp/core/src/main/protobuf/ --python_out=phoenixdb/avatica/proto avatica-tmp/core/src/main/protobuf/*.proto
17 | sed -i 's/import common_pb2/from . import common_pb2/' phoenixdb/avatica/proto/*_pb2.py
18 |
19 | rm -rf avatica-tmp
20 |
--------------------------------------------------------------------------------
/phoenixdb/__init__.py:
--------------------------------------------------------------------------------
1 | # Copyright 2015 Lukas Lalinsky
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | from phoenixdb import errors, types
16 | from phoenixdb.avatica import AvaticaClient
17 | from phoenixdb.connection import Connection
18 | from phoenixdb.errors import * # noqa: F401,F403
19 | from phoenixdb.types import * # noqa: F401,F403
20 |
21 | __all__ = ['connect', 'apilevel', 'threadsafety', 'paramstyle'] + types.__all__ + errors.__all__
22 |
23 |
24 | apilevel = "2.0"
25 | """
26 | This module supports the `DB API 2.0 interface `_.
27 | """
28 |
29 | threadsafety = 1
30 | """
31 | Multiple threads can share the module, but neither connections nor cursors.
32 | """
33 |
34 | paramstyle = 'qmark'
35 | """
36 | Parmetrized queries should use the question mark as a parameter placeholder.
37 |
38 | For example::
39 |
40 | cursor.execute("SELECT * FROM table WHERE id = ?", [my_id])
41 | """
42 |
43 |
44 | def connect(url, max_retries=None, **kwargs):
45 | """Connects to a Phoenix query server.
46 |
47 | :param url:
48 | URL to the Phoenix query server, e.g. ``http://localhost:8765/``
49 |
50 | :param autocommit:
51 | Switch the connection to autocommit mode.
52 |
53 | :param readonly:
54 | Switch the connection to readonly mode.
55 |
56 | :param max_retries:
57 | The maximum number of retries in case there is a connection error.
58 |
59 | :param cursor_factory:
60 | If specified, the connection's :attr:`~phoenixdb.connection.Connection.cursor_factory` is set to it.
61 |
62 | :returns:
63 | :class:`~phoenixdb.connection.Connection` object.
64 | """
65 | client = AvaticaClient(url, max_retries=max_retries)
66 | client.connect()
67 | return Connection(client, **kwargs)
68 |
--------------------------------------------------------------------------------
/phoenixdb/avatica/__init__.py:
--------------------------------------------------------------------------------
1 | from .client import AvaticaClient # noqa: F401
2 |
--------------------------------------------------------------------------------
/phoenixdb/avatica/client.py:
--------------------------------------------------------------------------------
1 | # Copyright 2015 Lukas Lalinsky
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | """Implementation of the JSON-over-HTTP RPC protocol used by Avatica."""
16 |
17 | import re
18 | import socket
19 | import pprint
20 | import math
21 | import logging
22 | import time
23 | from phoenixdb import errors
24 | from phoenixdb.avatica.proto import requests_pb2, common_pb2, responses_pb2
25 |
26 | try:
27 | import httplib
28 | except ImportError:
29 | import http.client as httplib
30 |
31 | try:
32 | import urlparse
33 | except ImportError:
34 | import urllib.parse as urlparse
35 |
36 | try:
37 | from HTMLParser import HTMLParser
38 | except ImportError:
39 | from html.parser import HTMLParser
40 |
41 | __all__ = ['AvaticaClient']
42 |
43 | logger = logging.getLogger(__name__)
44 |
45 |
46 | class JettyErrorPageParser(HTMLParser):
47 |
48 | def __init__(self):
49 | HTMLParser.__init__(self)
50 | self.path = []
51 | self.title = []
52 | self.message = []
53 |
54 | def handle_starttag(self, tag, attrs):
55 | self.path.append(tag)
56 |
57 | def handle_endtag(self, tag):
58 | self.path.pop()
59 |
60 | def handle_data(self, data):
61 | if len(self.path) > 2 and self.path[0] == 'html' and self.path[1] == 'body':
62 | if len(self.path) == 3 and self.path[2] == 'h2':
63 | self.title.append(data.strip())
64 | elif len(self.path) == 4 and self.path[2] == 'p' and self.path[3] == 'pre':
65 | self.message.append(data.strip())
66 |
67 |
68 | def parse_url(url):
69 | url = urlparse.urlparse(url)
70 | if not url.scheme and not url.netloc and url.path:
71 | netloc = url.path
72 | if ':' not in netloc:
73 | netloc = '{}:8765'.format(netloc)
74 | return urlparse.ParseResult('http', netloc, '/', '', '', '')
75 | return url
76 |
77 |
78 | # Defined in phoenix-core/src/main/java/org/apache/phoenix/exception/SQLExceptionCode.java
79 | SQLSTATE_ERROR_CLASSES = [
80 | ('08', errors.OperationalError), # Connection Exception
81 | ('22018', errors.IntegrityError), # Constraint violatioin.
82 | ('22', errors.DataError), # Data Exception
83 | ('23', errors.IntegrityError), # Constraint Violation
84 | ('24', errors.InternalError), # Invalid Cursor State
85 | ('25', errors.InternalError), # Invalid Transaction State
86 | ('42', errors.ProgrammingError), # Syntax Error or Access Rule Violation
87 | ('XLC', errors.OperationalError), # Execution exceptions
88 | ('INT', errors.InternalError), # Phoenix internal error
89 | ]
90 |
91 | # Relevant properties as defined by https://calcite.apache.org/avatica/docs/client_reference.html
92 | OPEN_CONNECTION_PROPERTIES = (
93 | 'user', # User for the database connection
94 | 'password', # Password for the user
95 | )
96 |
97 |
98 | def raise_sql_error(code, sqlstate, message):
99 | for prefix, error_class in SQLSTATE_ERROR_CLASSES:
100 | if sqlstate.startswith(prefix):
101 | raise error_class(message, code, sqlstate)
102 |
103 |
104 | def parse_and_raise_sql_error(message):
105 | match = re.findall(r'(?:([^ ]+): )?ERROR (\d+) \(([0-9A-Z]{5})\): (.*?) ->', message)
106 | if match is not None and len(match):
107 | exception, code, sqlstate, message = match[0]
108 | raise_sql_error(int(code), sqlstate, message)
109 |
110 |
111 | def parse_error_page(html):
112 | parser = JettyErrorPageParser()
113 | parser.feed(html)
114 | if parser.title == ['HTTP ERROR: 500']:
115 | message = ' '.join(parser.message).strip()
116 | parse_and_raise_sql_error(message)
117 | raise errors.InternalError(message)
118 |
119 |
120 | def parse_error_protobuf(text):
121 | message = common_pb2.WireMessage()
122 | message.ParseFromString(text)
123 |
124 | err = responses_pb2.ErrorResponse()
125 | err.ParseFromString(message.wrapped_message)
126 |
127 | parse_and_raise_sql_error(err.error_message)
128 | raise_sql_error(err.error_code, err.sql_state, err.error_message)
129 | raise errors.InternalError(err.error_message)
130 |
131 |
132 | class AvaticaClient(object):
133 | """Client for Avatica's RPC server.
134 |
135 | This exposes all low-level functionality that the Avatica
136 | server provides, using the native terminology. You most likely
137 | do not want to use this class directly, but rather get connect
138 | to a server using :func:`phoenixdb.connect`.
139 | """
140 |
141 | def __init__(self, url, max_retries=None):
142 | """Constructs a new client object.
143 |
144 | :param url:
145 | URL of an Avatica RPC server.
146 | """
147 | self.url = parse_url(url)
148 | self.max_retries = max_retries if max_retries is not None else 3
149 | self.connection = None
150 |
151 | def connect(self):
152 | """Opens a HTTP connection to the RPC server."""
153 | logger.debug("Opening connection to %s:%s", self.url.hostname, self.url.port)
154 | try:
155 | self.connection = httplib.HTTPConnection(self.url.hostname, self.url.port)
156 | self.connection.connect()
157 | except (httplib.HTTPException, socket.error) as e:
158 | raise errors.InterfaceError('Unable to connect to the specified service', e)
159 |
160 | def close(self):
161 | """Closes the HTTP connection to the RPC server."""
162 | if self.connection is not None:
163 | logger.debug("Closing connection to %s:%s", self.url.hostname, self.url.port)
164 | try:
165 | self.connection.close()
166 | except httplib.HTTPException:
167 | logger.warning("Error while closing connection", exc_info=True)
168 | self.connection = None
169 |
170 | def _post_request(self, body, headers):
171 | retry_count = self.max_retries
172 | while True:
173 | logger.debug("POST %s %r %r", self.url.path, body, headers)
174 | try:
175 | self.connection.request('POST', self.url.path, body=body, headers=headers)
176 | response = self.connection.getresponse()
177 | except httplib.HTTPException as e:
178 | if retry_count > 0:
179 | delay = math.exp(-retry_count)
180 | logger.debug("HTTP protocol error, will retry in %s seconds...", delay, exc_info=True)
181 | self.close()
182 | self.connect()
183 | time.sleep(delay)
184 | retry_count -= 1
185 | continue
186 | raise errors.InterfaceError('RPC request failed', cause=e)
187 | else:
188 | if response.status == httplib.SERVICE_UNAVAILABLE:
189 | if retry_count > 0:
190 | delay = math.exp(-retry_count)
191 | logger.debug("Service unavailable, will retry in %s seconds...", delay, exc_info=True)
192 | time.sleep(delay)
193 | retry_count -= 1
194 | continue
195 | return response
196 |
197 | def _apply(self, request_data, expected_response_type=None):
198 | logger.debug("Sending request\n%s", pprint.pformat(request_data))
199 |
200 | request_name = request_data.__class__.__name__
201 | message = common_pb2.WireMessage()
202 | message.name = 'org.apache.calcite.avatica.proto.Requests${}'.format(request_name)
203 | message.wrapped_message = request_data.SerializeToString()
204 | body = message.SerializeToString()
205 | headers = {'content-type': 'application/x-google-protobuf'}
206 |
207 | response = self._post_request(body, headers)
208 | response_body = response.read()
209 |
210 | if response.status != httplib.OK:
211 | logger.debug("Received response\n%s", response_body)
212 | if b'' in response_body:
213 | parse_error_page(response_body)
214 | else:
215 | # assume the response is in protobuf format
216 | parse_error_protobuf(response_body)
217 | raise errors.InterfaceError('RPC request returned invalid status code', response.status)
218 |
219 | message = common_pb2.WireMessage()
220 | message.ParseFromString(response_body)
221 |
222 | logger.debug("Received response\n%s", message)
223 |
224 | if expected_response_type is None:
225 | expected_response_type = request_name.replace('Request', 'Response')
226 |
227 | expected_response_type = 'org.apache.calcite.avatica.proto.Responses$' + expected_response_type
228 | if message.name != expected_response_type:
229 | raise errors.InterfaceError('unexpected response type "{}"'.format(message.name))
230 |
231 | return message.wrapped_message
232 |
233 | def get_catalogs(self, connection_id):
234 | request = requests_pb2.CatalogsRequest()
235 | request.connection_id = connection_id
236 | return self._apply(request)
237 |
238 | def get_schemas(self, connection_id, catalog=None, schemaPattern=None):
239 | request = requests_pb2.SchemasRequest()
240 | request.connection_id = connection_id
241 | if catalog is not None:
242 | request.catalog = catalog
243 | if schemaPattern is not None:
244 | request.schema_pattern = schemaPattern
245 | return self._apply(request)
246 |
247 | def get_tables(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, typeList=None):
248 | request = requests_pb2.TablesRequest()
249 | request.connection_id = connection_id
250 | if catalog is not None:
251 | request.catalog = catalog
252 | if schemaPattern is not None:
253 | request.schema_pattern = schemaPattern
254 | if tableNamePattern is not None:
255 | request.table_name_pattern = tableNamePattern
256 | if typeList is not None:
257 | request.type_list = typeList
258 | if typeList is not None:
259 | request.type_list.extend(typeList)
260 | request.has_type_list = typeList is not None
261 | return self._apply(request)
262 |
263 | def get_columns(self, connection_id, catalog=None, schemaPattern=None, tableNamePattern=None, columnNamePattern=None):
264 | request = requests_pb2.ColumnsRequest()
265 | request.connection_id = connection_id
266 | if catalog is not None:
267 | request.catalog = catalog
268 | if schemaPattern is not None:
269 | request.schema_pattern = schemaPattern
270 | if tableNamePattern is not None:
271 | request.table_name_pattern = tableNamePattern
272 | if columnNamePattern is not None:
273 | request.column_name_pattern = columnNamePattern
274 | return self._apply(request)
275 |
276 | def get_table_types(self, connection_id):
277 | request = requests_pb2.TableTypesRequest()
278 | request.connection_id = connection_id
279 | return self._apply(request)
280 |
281 | def get_type_info(self, connection_id):
282 | request = requests_pb2.TypeInfoRequest()
283 | request.connection_id = connection_id
284 | return self._apply(request)
285 |
286 | def connection_sync(self, connection_id, connProps=None):
287 | """Synchronizes connection properties with the server.
288 |
289 | :param connection_id:
290 | ID of the current connection.
291 |
292 | :param connProps:
293 | Dictionary with the properties that should be changed.
294 |
295 | :returns:
296 | A ``common_pb2.ConnectionProperties`` object.
297 | """
298 | if connProps is None:
299 | connProps = {}
300 |
301 | request = requests_pb2.ConnectionSyncRequest()
302 | request.connection_id = connection_id
303 | request.conn_props.auto_commit = connProps.get('autoCommit', False)
304 | request.conn_props.has_auto_commit = True
305 | request.conn_props.read_only = connProps.get('readOnly', False)
306 | request.conn_props.has_read_only = True
307 | request.conn_props.transaction_isolation = connProps.get('transactionIsolation', 0)
308 | request.conn_props.catalog = connProps.get('catalog', '')
309 | request.conn_props.schema = connProps.get('schema', '')
310 |
311 | response_data = self._apply(request)
312 | response = responses_pb2.ConnectionSyncResponse()
313 | response.ParseFromString(response_data)
314 | return response.conn_props
315 |
316 | def open_connection(self, connection_id, info=None):
317 | """Opens a new connection.
318 |
319 | :param connection_id:
320 | ID of the connection to open.
321 | """
322 | request = requests_pb2.OpenConnectionRequest()
323 | request.connection_id = connection_id
324 | if info is not None:
325 | # Info is a list of repeated pairs, setting a dict directly fails
326 | for k, v in info.items():
327 | request.info[k] = v
328 |
329 | response_data = self._apply(request)
330 | response = responses_pb2.OpenConnectionResponse()
331 | response.ParseFromString(response_data)
332 |
333 | def close_connection(self, connection_id):
334 | """Closes a connection.
335 |
336 | :param connection_id:
337 | ID of the connection to close.
338 | """
339 | request = requests_pb2.CloseConnectionRequest()
340 | request.connection_id = connection_id
341 | self._apply(request)
342 |
343 | def create_statement(self, connection_id):
344 | """Creates a new statement.
345 |
346 | :param connection_id:
347 | ID of the current connection.
348 |
349 | :returns:
350 | New statement ID.
351 | """
352 | request = requests_pb2.CreateStatementRequest()
353 | request.connection_id = connection_id
354 |
355 | response_data = self._apply(request)
356 | response = responses_pb2.CreateStatementResponse()
357 | response.ParseFromString(response_data)
358 | return response.statement_id
359 |
360 | def close_statement(self, connection_id, statement_id):
361 | """Closes a statement.
362 |
363 | :param connection_id:
364 | ID of the current connection.
365 |
366 | :param statement_id:
367 | ID of the statement to close.
368 | """
369 | request = requests_pb2.CloseStatementRequest()
370 | request.connection_id = connection_id
371 | request.statement_id = statement_id
372 |
373 | self._apply(request)
374 |
375 | def prepare_and_execute(self, connection_id, statement_id, sql, max_rows_total=None, first_frame_max_size=None):
376 | """Prepares and immediately executes a statement.
377 |
378 | :param connection_id:
379 | ID of the current connection.
380 |
381 | :param statement_id:
382 | ID of the statement to prepare.
383 |
384 | :param sql:
385 | SQL query.
386 |
387 | :param max_rows_total:
388 | The maximum number of rows that will be allowed for this query.
389 |
390 | :param first_frame_max_size:
391 | The maximum number of rows that will be returned in the first Frame returned for this query.
392 |
393 | :returns:
394 | Result set with the signature of the prepared statement and the first frame data.
395 | """
396 | request = requests_pb2.PrepareAndExecuteRequest()
397 | request.connection_id = connection_id
398 | request.statement_id = statement_id
399 | request.sql = sql
400 | if max_rows_total is not None:
401 | request.max_rows_total = max_rows_total
402 | if first_frame_max_size is not None:
403 | request.first_frame_max_size = first_frame_max_size
404 |
405 | response_data = self._apply(request, 'ExecuteResponse')
406 | response = responses_pb2.ExecuteResponse()
407 | response.ParseFromString(response_data)
408 | return response.results
409 |
410 | def prepare(self, connection_id, sql, max_rows_total=None):
411 | """Prepares a statement.
412 |
413 | :param connection_id:
414 | ID of the current connection.
415 |
416 | :param sql:
417 | SQL query.
418 |
419 | :param max_rows_total:
420 | The maximum number of rows that will be allowed for this query.
421 |
422 | :returns:
423 | Signature of the prepared statement.
424 | """
425 | request = requests_pb2.PrepareRequest()
426 | request.connection_id = connection_id
427 | request.sql = sql
428 | if max_rows_total is not None:
429 | request.max_rows_total = max_rows_total
430 |
431 | response_data = self._apply(request)
432 | response = responses_pb2.PrepareResponse()
433 | response.ParseFromString(response_data)
434 | return response.statement
435 |
436 | def execute(self, connection_id, statement_id, signature, parameter_values=None, first_frame_max_size=None):
437 | """Returns a frame of rows.
438 |
439 | The frame describes whether there may be another frame. If there is not
440 | another frame, the current iteration is done when we have finished the
441 | rows in the this frame.
442 |
443 | :param connection_id:
444 | ID of the current connection.
445 |
446 | :param statement_id:
447 | ID of the statement to fetch rows from.
448 |
449 | :param signature:
450 | common_pb2.Signature object
451 |
452 | :param parameter_values:
453 | A list of parameter values, if statement is to be executed; otherwise ``None``.
454 |
455 | :param first_frame_max_size:
456 | The maximum number of rows that will be returned in the first Frame returned for this query.
457 |
458 | :returns:
459 | Frame data, or ``None`` if there are no more.
460 | """
461 | request = requests_pb2.ExecuteRequest()
462 | request.statementHandle.id = statement_id
463 | request.statementHandle.connection_id = connection_id
464 | request.statementHandle.signature.CopyFrom(signature)
465 | if parameter_values is not None:
466 | request.parameter_values.extend(parameter_values)
467 | request.has_parameter_values = True
468 | if first_frame_max_size is not None:
469 | request.deprecated_first_frame_max_size = first_frame_max_size
470 | request.first_frame_max_size = first_frame_max_size
471 |
472 | response_data = self._apply(request)
473 | response = responses_pb2.ExecuteResponse()
474 | response.ParseFromString(response_data)
475 | return response.results
476 |
477 | def fetch(self, connection_id, statement_id, offset=0, frame_max_size=None):
478 | """Returns a frame of rows.
479 |
480 | The frame describes whether there may be another frame. If there is not
481 | another frame, the current iteration is done when we have finished the
482 | rows in the this frame.
483 |
484 | :param connection_id:
485 | ID of the current connection.
486 |
487 | :param statement_id:
488 | ID of the statement to fetch rows from.
489 |
490 | :param offset:
491 | Zero-based offset of first row in the requested frame.
492 |
493 | :param frame_max_size:
494 | Maximum number of rows to return; negative means no limit.
495 |
496 | :returns:
497 | Frame data, or ``None`` if there are no more.
498 | """
499 | request = requests_pb2.FetchRequest()
500 | request.connection_id = connection_id
501 | request.statement_id = statement_id
502 | request.offset = offset
503 | if frame_max_size is not None:
504 | request.frame_max_size = frame_max_size
505 |
506 | response_data = self._apply(request)
507 | response = responses_pb2.FetchResponse()
508 | response.ParseFromString(response_data)
509 | return response.frame
510 |
--------------------------------------------------------------------------------
/phoenixdb/avatica/proto/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/lalinsky/python-phoenixdb/1bb34488dd530ca65f91b29ef16aa7b71f26b806/phoenixdb/avatica/proto/__init__.py
--------------------------------------------------------------------------------
/phoenixdb/avatica/proto/responses_pb2.py:
--------------------------------------------------------------------------------
1 | # Generated by the protocol buffer compiler. DO NOT EDIT!
2 | # source: responses.proto
3 |
4 | import sys
5 | _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
6 | from google.protobuf import descriptor as _descriptor
7 | from google.protobuf import message as _message
8 | from google.protobuf import reflection as _reflection
9 | from google.protobuf import symbol_database as _symbol_database
10 | from google.protobuf import descriptor_pb2
11 | # @@protoc_insertion_point(imports)
12 |
13 | _sym_db = _symbol_database.Default()
14 |
15 |
16 | from . import common_pb2 as common__pb2
17 |
18 |
19 | DESCRIPTOR = _descriptor.FileDescriptor(
20 | name='responses.proto',
21 | package='',
22 | syntax='proto3',
23 | serialized_pb=_b('\n\x0fresponses.proto\x1a\x0c\x63ommon.proto\"\xc9\x01\n\x11ResultSetResponse\x12\x15\n\rconnection_id\x18\x01 \x01(\t\x12\x14\n\x0cstatement_id\x18\x02 \x01(\r\x12\x15\n\rown_statement\x18\x03 \x01(\x08\x12\x1d\n\tsignature\x18\x04 \x01(\x0b\x32\n.Signature\x12\x1b\n\x0b\x66irst_frame\x18\x05 \x01(\x0b\x32\x06.Frame\x12\x14\n\x0cupdate_count\x18\x06 \x01(\x04\x12\x1e\n\x08metadata\x18\x07 \x01(\x0b\x32\x0c.RpcMetadata\"q\n\x0f\x45xecuteResponse\x12#\n\x07results\x18\x01 \x03(\x0b\x32\x12.ResultSetResponse\x12\x19\n\x11missing_statement\x18\x02 \x01(\x08\x12\x1e\n\x08metadata\x18\x03 \x01(\x0b\x32\x0c.RpcMetadata\"V\n\x0fPrepareResponse\x12#\n\tstatement\x18\x01 \x01(\x0b\x32\x10.StatementHandle\x12\x1e\n\x08metadata\x18\x02 \x01(\x0b\x32\x0c.RpcMetadata\"z\n\rFetchResponse\x12\x15\n\x05\x66rame\x18\x01 \x01(\x0b\x32\x06.Frame\x12\x19\n\x11missing_statement\x18\x02 \x01(\x08\x12\x17\n\x0fmissing_results\x18\x03 \x01(\x08\x12\x1e\n\x08metadata\x18\x04 \x01(\x0b\x32\x0c.RpcMetadata\"f\n\x17\x43reateStatementResponse\x12\x15\n\rconnection_id\x18\x01 \x01(\t\x12\x14\n\x0cstatement_id\x18\x02 \x01(\r\x12\x1e\n\x08metadata\x18\x03 \x01(\x0b\x32\x0c.RpcMetadata\"8\n\x16\x43loseStatementResponse\x12\x1e\n\x08metadata\x18\x01 \x01(\x0b\x32\x0c.RpcMetadata\"8\n\x16OpenConnectionResponse\x12\x1e\n\x08metadata\x18\x01 \x01(\x0b\x32\x0c.RpcMetadata\"9\n\x17\x43loseConnectionResponse\x12\x1e\n\x08metadata\x18\x01 \x01(\x0b\x32\x0c.RpcMetadata\"c\n\x16\x43onnectionSyncResponse\x12)\n\nconn_props\x18\x01 \x01(\x0b\x32\x15.ConnectionProperties\x12\x1e\n\x08metadata\x18\x02 \x01(\x0b\x32\x0c.RpcMetadata\"u\n\x17\x44\x61tabasePropertyElement\x12\x1e\n\x03key\x18\x01 \x01(\x0b\x32\x11.DatabaseProperty\x12\x1a\n\x05value\x18\x02 \x01(\x0b\x32\x0b.TypedValue\x12\x1e\n\x08metadata\x18\x03 \x01(\x0b\x32\x0c.RpcMetadata\"c\n\x18\x44\x61tabasePropertyResponse\x12\'\n\x05props\x18\x01 \x03(\x0b\x32\x18.DatabasePropertyElement\x12\x1e\n\x08metadata\x18\x02 \x01(\x0b\x32\x0c.RpcMetadata\"\xb6\x01\n\rErrorResponse\x12\x12\n\nexceptions\x18\x01 \x03(\t\x12\x16\n\x0ehas_exceptions\x18\x07 \x01(\x08\x12\x15\n\rerror_message\x18\x02 \x01(\t\x12\x1b\n\x08severity\x18\x03 \x01(\x0e\x32\t.Severity\x12\x12\n\nerror_code\x18\x04 \x01(\r\x12\x11\n\tsql_state\x18\x05 \x01(\t\x12\x1e\n\x08metadata\x18\x06 \x01(\x0b\x32\x0c.RpcMetadata\"f\n\x13SyncResultsResponse\x12\x19\n\x11missing_statement\x18\x01 \x01(\x08\x12\x14\n\x0cmore_results\x18\x02 \x01(\x08\x12\x1e\n\x08metadata\x18\x03 \x01(\x0b\x32\x0c.RpcMetadata\"%\n\x0bRpcMetadata\x12\x16\n\x0eserver_address\x18\x01 \x01(\t\"\x10\n\x0e\x43ommitResponse\"\x12\n\x10RollbackResponse\"\x95\x01\n\x14\x45xecuteBatchResponse\x12\x15\n\rconnection_id\x18\x01 \x01(\t\x12\x14\n\x0cstatement_id\x18\x02 \x01(\r\x12\x15\n\rupdate_counts\x18\x03 \x03(\x04\x12\x19\n\x11missing_statement\x18\x04 \x01(\x08\x12\x1e\n\x08metadata\x18\x05 \x01(\x0b\x32\x0c.RpcMetadataB\"\n org.apache.calcite.avatica.protob\x06proto3')
24 | ,
25 | dependencies=[common__pb2.DESCRIPTOR,])
26 | _sym_db.RegisterFileDescriptor(DESCRIPTOR)
27 |
28 |
29 |
30 |
31 | _RESULTSETRESPONSE = _descriptor.Descriptor(
32 | name='ResultSetResponse',
33 | full_name='ResultSetResponse',
34 | filename=None,
35 | file=DESCRIPTOR,
36 | containing_type=None,
37 | fields=[
38 | _descriptor.FieldDescriptor(
39 | name='connection_id', full_name='ResultSetResponse.connection_id', index=0,
40 | number=1, type=9, cpp_type=9, label=1,
41 | has_default_value=False, default_value=_b("").decode('utf-8'),
42 | message_type=None, enum_type=None, containing_type=None,
43 | is_extension=False, extension_scope=None,
44 | options=None),
45 | _descriptor.FieldDescriptor(
46 | name='statement_id', full_name='ResultSetResponse.statement_id', index=1,
47 | number=2, type=13, cpp_type=3, label=1,
48 | has_default_value=False, default_value=0,
49 | message_type=None, enum_type=None, containing_type=None,
50 | is_extension=False, extension_scope=None,
51 | options=None),
52 | _descriptor.FieldDescriptor(
53 | name='own_statement', full_name='ResultSetResponse.own_statement', index=2,
54 | number=3, type=8, cpp_type=7, label=1,
55 | has_default_value=False, default_value=False,
56 | message_type=None, enum_type=None, containing_type=None,
57 | is_extension=False, extension_scope=None,
58 | options=None),
59 | _descriptor.FieldDescriptor(
60 | name='signature', full_name='ResultSetResponse.signature', index=3,
61 | number=4, type=11, cpp_type=10, label=1,
62 | has_default_value=False, default_value=None,
63 | message_type=None, enum_type=None, containing_type=None,
64 | is_extension=False, extension_scope=None,
65 | options=None),
66 | _descriptor.FieldDescriptor(
67 | name='first_frame', full_name='ResultSetResponse.first_frame', index=4,
68 | number=5, type=11, cpp_type=10, label=1,
69 | has_default_value=False, default_value=None,
70 | message_type=None, enum_type=None, containing_type=None,
71 | is_extension=False, extension_scope=None,
72 | options=None),
73 | _descriptor.FieldDescriptor(
74 | name='update_count', full_name='ResultSetResponse.update_count', index=5,
75 | number=6, type=4, cpp_type=4, label=1,
76 | has_default_value=False, default_value=0,
77 | message_type=None, enum_type=None, containing_type=None,
78 | is_extension=False, extension_scope=None,
79 | options=None),
80 | _descriptor.FieldDescriptor(
81 | name='metadata', full_name='ResultSetResponse.metadata', index=6,
82 | number=7, type=11, cpp_type=10, label=1,
83 | has_default_value=False, default_value=None,
84 | message_type=None, enum_type=None, containing_type=None,
85 | is_extension=False, extension_scope=None,
86 | options=None),
87 | ],
88 | extensions=[
89 | ],
90 | nested_types=[],
91 | enum_types=[
92 | ],
93 | options=None,
94 | is_extendable=False,
95 | syntax='proto3',
96 | extension_ranges=[],
97 | oneofs=[
98 | ],
99 | serialized_start=34,
100 | serialized_end=235,
101 | )
102 |
103 |
104 | _EXECUTERESPONSE = _descriptor.Descriptor(
105 | name='ExecuteResponse',
106 | full_name='ExecuteResponse',
107 | filename=None,
108 | file=DESCRIPTOR,
109 | containing_type=None,
110 | fields=[
111 | _descriptor.FieldDescriptor(
112 | name='results', full_name='ExecuteResponse.results', index=0,
113 | number=1, type=11, cpp_type=10, label=3,
114 | has_default_value=False, default_value=[],
115 | message_type=None, enum_type=None, containing_type=None,
116 | is_extension=False, extension_scope=None,
117 | options=None),
118 | _descriptor.FieldDescriptor(
119 | name='missing_statement', full_name='ExecuteResponse.missing_statement', index=1,
120 | number=2, type=8, cpp_type=7, label=1,
121 | has_default_value=False, default_value=False,
122 | message_type=None, enum_type=None, containing_type=None,
123 | is_extension=False, extension_scope=None,
124 | options=None),
125 | _descriptor.FieldDescriptor(
126 | name='metadata', full_name='ExecuteResponse.metadata', index=2,
127 | number=3, type=11, cpp_type=10, label=1,
128 | has_default_value=False, default_value=None,
129 | message_type=None, enum_type=None, containing_type=None,
130 | is_extension=False, extension_scope=None,
131 | options=None),
132 | ],
133 | extensions=[
134 | ],
135 | nested_types=[],
136 | enum_types=[
137 | ],
138 | options=None,
139 | is_extendable=False,
140 | syntax='proto3',
141 | extension_ranges=[],
142 | oneofs=[
143 | ],
144 | serialized_start=237,
145 | serialized_end=350,
146 | )
147 |
148 |
149 | _PREPARERESPONSE = _descriptor.Descriptor(
150 | name='PrepareResponse',
151 | full_name='PrepareResponse',
152 | filename=None,
153 | file=DESCRIPTOR,
154 | containing_type=None,
155 | fields=[
156 | _descriptor.FieldDescriptor(
157 | name='statement', full_name='PrepareResponse.statement', index=0,
158 | number=1, type=11, cpp_type=10, label=1,
159 | has_default_value=False, default_value=None,
160 | message_type=None, enum_type=None, containing_type=None,
161 | is_extension=False, extension_scope=None,
162 | options=None),
163 | _descriptor.FieldDescriptor(
164 | name='metadata', full_name='PrepareResponse.metadata', index=1,
165 | number=2, type=11, cpp_type=10, label=1,
166 | has_default_value=False, default_value=None,
167 | message_type=None, enum_type=None, containing_type=None,
168 | is_extension=False, extension_scope=None,
169 | options=None),
170 | ],
171 | extensions=[
172 | ],
173 | nested_types=[],
174 | enum_types=[
175 | ],
176 | options=None,
177 | is_extendable=False,
178 | syntax='proto3',
179 | extension_ranges=[],
180 | oneofs=[
181 | ],
182 | serialized_start=352,
183 | serialized_end=438,
184 | )
185 |
186 |
187 | _FETCHRESPONSE = _descriptor.Descriptor(
188 | name='FetchResponse',
189 | full_name='FetchResponse',
190 | filename=None,
191 | file=DESCRIPTOR,
192 | containing_type=None,
193 | fields=[
194 | _descriptor.FieldDescriptor(
195 | name='frame', full_name='FetchResponse.frame', index=0,
196 | number=1, type=11, cpp_type=10, label=1,
197 | has_default_value=False, default_value=None,
198 | message_type=None, enum_type=None, containing_type=None,
199 | is_extension=False, extension_scope=None,
200 | options=None),
201 | _descriptor.FieldDescriptor(
202 | name='missing_statement', full_name='FetchResponse.missing_statement', index=1,
203 | number=2, type=8, cpp_type=7, label=1,
204 | has_default_value=False, default_value=False,
205 | message_type=None, enum_type=None, containing_type=None,
206 | is_extension=False, extension_scope=None,
207 | options=None),
208 | _descriptor.FieldDescriptor(
209 | name='missing_results', full_name='FetchResponse.missing_results', index=2,
210 | number=3, type=8, cpp_type=7, label=1,
211 | has_default_value=False, default_value=False,
212 | message_type=None, enum_type=None, containing_type=None,
213 | is_extension=False, extension_scope=None,
214 | options=None),
215 | _descriptor.FieldDescriptor(
216 | name='metadata', full_name='FetchResponse.metadata', index=3,
217 | number=4, type=11, cpp_type=10, label=1,
218 | has_default_value=False, default_value=None,
219 | message_type=None, enum_type=None, containing_type=None,
220 | is_extension=False, extension_scope=None,
221 | options=None),
222 | ],
223 | extensions=[
224 | ],
225 | nested_types=[],
226 | enum_types=[
227 | ],
228 | options=None,
229 | is_extendable=False,
230 | syntax='proto3',
231 | extension_ranges=[],
232 | oneofs=[
233 | ],
234 | serialized_start=440,
235 | serialized_end=562,
236 | )
237 |
238 |
239 | _CREATESTATEMENTRESPONSE = _descriptor.Descriptor(
240 | name='CreateStatementResponse',
241 | full_name='CreateStatementResponse',
242 | filename=None,
243 | file=DESCRIPTOR,
244 | containing_type=None,
245 | fields=[
246 | _descriptor.FieldDescriptor(
247 | name='connection_id', full_name='CreateStatementResponse.connection_id', index=0,
248 | number=1, type=9, cpp_type=9, label=1,
249 | has_default_value=False, default_value=_b("").decode('utf-8'),
250 | message_type=None, enum_type=None, containing_type=None,
251 | is_extension=False, extension_scope=None,
252 | options=None),
253 | _descriptor.FieldDescriptor(
254 | name='statement_id', full_name='CreateStatementResponse.statement_id', index=1,
255 | number=2, type=13, cpp_type=3, label=1,
256 | has_default_value=False, default_value=0,
257 | message_type=None, enum_type=None, containing_type=None,
258 | is_extension=False, extension_scope=None,
259 | options=None),
260 | _descriptor.FieldDescriptor(
261 | name='metadata', full_name='CreateStatementResponse.metadata', index=2,
262 | number=3, type=11, cpp_type=10, label=1,
263 | has_default_value=False, default_value=None,
264 | message_type=None, enum_type=None, containing_type=None,
265 | is_extension=False, extension_scope=None,
266 | options=None),
267 | ],
268 | extensions=[
269 | ],
270 | nested_types=[],
271 | enum_types=[
272 | ],
273 | options=None,
274 | is_extendable=False,
275 | syntax='proto3',
276 | extension_ranges=[],
277 | oneofs=[
278 | ],
279 | serialized_start=564,
280 | serialized_end=666,
281 | )
282 |
283 |
284 | _CLOSESTATEMENTRESPONSE = _descriptor.Descriptor(
285 | name='CloseStatementResponse',
286 | full_name='CloseStatementResponse',
287 | filename=None,
288 | file=DESCRIPTOR,
289 | containing_type=None,
290 | fields=[
291 | _descriptor.FieldDescriptor(
292 | name='metadata', full_name='CloseStatementResponse.metadata', index=0,
293 | number=1, type=11, cpp_type=10, label=1,
294 | has_default_value=False, default_value=None,
295 | message_type=None, enum_type=None, containing_type=None,
296 | is_extension=False, extension_scope=None,
297 | options=None),
298 | ],
299 | extensions=[
300 | ],
301 | nested_types=[],
302 | enum_types=[
303 | ],
304 | options=None,
305 | is_extendable=False,
306 | syntax='proto3',
307 | extension_ranges=[],
308 | oneofs=[
309 | ],
310 | serialized_start=668,
311 | serialized_end=724,
312 | )
313 |
314 |
315 | _OPENCONNECTIONRESPONSE = _descriptor.Descriptor(
316 | name='OpenConnectionResponse',
317 | full_name='OpenConnectionResponse',
318 | filename=None,
319 | file=DESCRIPTOR,
320 | containing_type=None,
321 | fields=[
322 | _descriptor.FieldDescriptor(
323 | name='metadata', full_name='OpenConnectionResponse.metadata', index=0,
324 | number=1, type=11, cpp_type=10, label=1,
325 | has_default_value=False, default_value=None,
326 | message_type=None, enum_type=None, containing_type=None,
327 | is_extension=False, extension_scope=None,
328 | options=None),
329 | ],
330 | extensions=[
331 | ],
332 | nested_types=[],
333 | enum_types=[
334 | ],
335 | options=None,
336 | is_extendable=False,
337 | syntax='proto3',
338 | extension_ranges=[],
339 | oneofs=[
340 | ],
341 | serialized_start=726,
342 | serialized_end=782,
343 | )
344 |
345 |
346 | _CLOSECONNECTIONRESPONSE = _descriptor.Descriptor(
347 | name='CloseConnectionResponse',
348 | full_name='CloseConnectionResponse',
349 | filename=None,
350 | file=DESCRIPTOR,
351 | containing_type=None,
352 | fields=[
353 | _descriptor.FieldDescriptor(
354 | name='metadata', full_name='CloseConnectionResponse.metadata', index=0,
355 | number=1, type=11, cpp_type=10, label=1,
356 | has_default_value=False, default_value=None,
357 | message_type=None, enum_type=None, containing_type=None,
358 | is_extension=False, extension_scope=None,
359 | options=None),
360 | ],
361 | extensions=[
362 | ],
363 | nested_types=[],
364 | enum_types=[
365 | ],
366 | options=None,
367 | is_extendable=False,
368 | syntax='proto3',
369 | extension_ranges=[],
370 | oneofs=[
371 | ],
372 | serialized_start=784,
373 | serialized_end=841,
374 | )
375 |
376 |
377 | _CONNECTIONSYNCRESPONSE = _descriptor.Descriptor(
378 | name='ConnectionSyncResponse',
379 | full_name='ConnectionSyncResponse',
380 | filename=None,
381 | file=DESCRIPTOR,
382 | containing_type=None,
383 | fields=[
384 | _descriptor.FieldDescriptor(
385 | name='conn_props', full_name='ConnectionSyncResponse.conn_props', index=0,
386 | number=1, type=11, cpp_type=10, label=1,
387 | has_default_value=False, default_value=None,
388 | message_type=None, enum_type=None, containing_type=None,
389 | is_extension=False, extension_scope=None,
390 | options=None),
391 | _descriptor.FieldDescriptor(
392 | name='metadata', full_name='ConnectionSyncResponse.metadata', index=1,
393 | number=2, type=11, cpp_type=10, label=1,
394 | has_default_value=False, default_value=None,
395 | message_type=None, enum_type=None, containing_type=None,
396 | is_extension=False, extension_scope=None,
397 | options=None),
398 | ],
399 | extensions=[
400 | ],
401 | nested_types=[],
402 | enum_types=[
403 | ],
404 | options=None,
405 | is_extendable=False,
406 | syntax='proto3',
407 | extension_ranges=[],
408 | oneofs=[
409 | ],
410 | serialized_start=843,
411 | serialized_end=942,
412 | )
413 |
414 |
415 | _DATABASEPROPERTYELEMENT = _descriptor.Descriptor(
416 | name='DatabasePropertyElement',
417 | full_name='DatabasePropertyElement',
418 | filename=None,
419 | file=DESCRIPTOR,
420 | containing_type=None,
421 | fields=[
422 | _descriptor.FieldDescriptor(
423 | name='key', full_name='DatabasePropertyElement.key', index=0,
424 | number=1, type=11, cpp_type=10, label=1,
425 | has_default_value=False, default_value=None,
426 | message_type=None, enum_type=None, containing_type=None,
427 | is_extension=False, extension_scope=None,
428 | options=None),
429 | _descriptor.FieldDescriptor(
430 | name='value', full_name='DatabasePropertyElement.value', index=1,
431 | number=2, type=11, cpp_type=10, label=1,
432 | has_default_value=False, default_value=None,
433 | message_type=None, enum_type=None, containing_type=None,
434 | is_extension=False, extension_scope=None,
435 | options=None),
436 | _descriptor.FieldDescriptor(
437 | name='metadata', full_name='DatabasePropertyElement.metadata', index=2,
438 | number=3, type=11, cpp_type=10, label=1,
439 | has_default_value=False, default_value=None,
440 | message_type=None, enum_type=None, containing_type=None,
441 | is_extension=False, extension_scope=None,
442 | options=None),
443 | ],
444 | extensions=[
445 | ],
446 | nested_types=[],
447 | enum_types=[
448 | ],
449 | options=None,
450 | is_extendable=False,
451 | syntax='proto3',
452 | extension_ranges=[],
453 | oneofs=[
454 | ],
455 | serialized_start=944,
456 | serialized_end=1061,
457 | )
458 |
459 |
460 | _DATABASEPROPERTYRESPONSE = _descriptor.Descriptor(
461 | name='DatabasePropertyResponse',
462 | full_name='DatabasePropertyResponse',
463 | filename=None,
464 | file=DESCRIPTOR,
465 | containing_type=None,
466 | fields=[
467 | _descriptor.FieldDescriptor(
468 | name='props', full_name='DatabasePropertyResponse.props', index=0,
469 | number=1, type=11, cpp_type=10, label=3,
470 | has_default_value=False, default_value=[],
471 | message_type=None, enum_type=None, containing_type=None,
472 | is_extension=False, extension_scope=None,
473 | options=None),
474 | _descriptor.FieldDescriptor(
475 | name='metadata', full_name='DatabasePropertyResponse.metadata', index=1,
476 | number=2, type=11, cpp_type=10, label=1,
477 | has_default_value=False, default_value=None,
478 | message_type=None, enum_type=None, containing_type=None,
479 | is_extension=False, extension_scope=None,
480 | options=None),
481 | ],
482 | extensions=[
483 | ],
484 | nested_types=[],
485 | enum_types=[
486 | ],
487 | options=None,
488 | is_extendable=False,
489 | syntax='proto3',
490 | extension_ranges=[],
491 | oneofs=[
492 | ],
493 | serialized_start=1063,
494 | serialized_end=1162,
495 | )
496 |
497 |
498 | _ERRORRESPONSE = _descriptor.Descriptor(
499 | name='ErrorResponse',
500 | full_name='ErrorResponse',
501 | filename=None,
502 | file=DESCRIPTOR,
503 | containing_type=None,
504 | fields=[
505 | _descriptor.FieldDescriptor(
506 | name='exceptions', full_name='ErrorResponse.exceptions', index=0,
507 | number=1, type=9, cpp_type=9, label=3,
508 | has_default_value=False, default_value=[],
509 | message_type=None, enum_type=None, containing_type=None,
510 | is_extension=False, extension_scope=None,
511 | options=None),
512 | _descriptor.FieldDescriptor(
513 | name='has_exceptions', full_name='ErrorResponse.has_exceptions', index=1,
514 | number=7, type=8, cpp_type=7, label=1,
515 | has_default_value=False, default_value=False,
516 | message_type=None, enum_type=None, containing_type=None,
517 | is_extension=False, extension_scope=None,
518 | options=None),
519 | _descriptor.FieldDescriptor(
520 | name='error_message', full_name='ErrorResponse.error_message', index=2,
521 | number=2, type=9, cpp_type=9, label=1,
522 | has_default_value=False, default_value=_b("").decode('utf-8'),
523 | message_type=None, enum_type=None, containing_type=None,
524 | is_extension=False, extension_scope=None,
525 | options=None),
526 | _descriptor.FieldDescriptor(
527 | name='severity', full_name='ErrorResponse.severity', index=3,
528 | number=3, type=14, cpp_type=8, label=1,
529 | has_default_value=False, default_value=0,
530 | message_type=None, enum_type=None, containing_type=None,
531 | is_extension=False, extension_scope=None,
532 | options=None),
533 | _descriptor.FieldDescriptor(
534 | name='error_code', full_name='ErrorResponse.error_code', index=4,
535 | number=4, type=13, cpp_type=3, label=1,
536 | has_default_value=False, default_value=0,
537 | message_type=None, enum_type=None, containing_type=None,
538 | is_extension=False, extension_scope=None,
539 | options=None),
540 | _descriptor.FieldDescriptor(
541 | name='sql_state', full_name='ErrorResponse.sql_state', index=5,
542 | number=5, type=9, cpp_type=9, label=1,
543 | has_default_value=False, default_value=_b("").decode('utf-8'),
544 | message_type=None, enum_type=None, containing_type=None,
545 | is_extension=False, extension_scope=None,
546 | options=None),
547 | _descriptor.FieldDescriptor(
548 | name='metadata', full_name='ErrorResponse.metadata', index=6,
549 | number=6, type=11, cpp_type=10, label=1,
550 | has_default_value=False, default_value=None,
551 | message_type=None, enum_type=None, containing_type=None,
552 | is_extension=False, extension_scope=None,
553 | options=None),
554 | ],
555 | extensions=[
556 | ],
557 | nested_types=[],
558 | enum_types=[
559 | ],
560 | options=None,
561 | is_extendable=False,
562 | syntax='proto3',
563 | extension_ranges=[],
564 | oneofs=[
565 | ],
566 | serialized_start=1165,
567 | serialized_end=1347,
568 | )
569 |
570 |
571 | _SYNCRESULTSRESPONSE = _descriptor.Descriptor(
572 | name='SyncResultsResponse',
573 | full_name='SyncResultsResponse',
574 | filename=None,
575 | file=DESCRIPTOR,
576 | containing_type=None,
577 | fields=[
578 | _descriptor.FieldDescriptor(
579 | name='missing_statement', full_name='SyncResultsResponse.missing_statement', index=0,
580 | number=1, type=8, cpp_type=7, label=1,
581 | has_default_value=False, default_value=False,
582 | message_type=None, enum_type=None, containing_type=None,
583 | is_extension=False, extension_scope=None,
584 | options=None),
585 | _descriptor.FieldDescriptor(
586 | name='more_results', full_name='SyncResultsResponse.more_results', index=1,
587 | number=2, type=8, cpp_type=7, label=1,
588 | has_default_value=False, default_value=False,
589 | message_type=None, enum_type=None, containing_type=None,
590 | is_extension=False, extension_scope=None,
591 | options=None),
592 | _descriptor.FieldDescriptor(
593 | name='metadata', full_name='SyncResultsResponse.metadata', index=2,
594 | number=3, type=11, cpp_type=10, label=1,
595 | has_default_value=False, default_value=None,
596 | message_type=None, enum_type=None, containing_type=None,
597 | is_extension=False, extension_scope=None,
598 | options=None),
599 | ],
600 | extensions=[
601 | ],
602 | nested_types=[],
603 | enum_types=[
604 | ],
605 | options=None,
606 | is_extendable=False,
607 | syntax='proto3',
608 | extension_ranges=[],
609 | oneofs=[
610 | ],
611 | serialized_start=1349,
612 | serialized_end=1451,
613 | )
614 |
615 |
616 | _RPCMETADATA = _descriptor.Descriptor(
617 | name='RpcMetadata',
618 | full_name='RpcMetadata',
619 | filename=None,
620 | file=DESCRIPTOR,
621 | containing_type=None,
622 | fields=[
623 | _descriptor.FieldDescriptor(
624 | name='server_address', full_name='RpcMetadata.server_address', index=0,
625 | number=1, type=9, cpp_type=9, label=1,
626 | has_default_value=False, default_value=_b("").decode('utf-8'),
627 | message_type=None, enum_type=None, containing_type=None,
628 | is_extension=False, extension_scope=None,
629 | options=None),
630 | ],
631 | extensions=[
632 | ],
633 | nested_types=[],
634 | enum_types=[
635 | ],
636 | options=None,
637 | is_extendable=False,
638 | syntax='proto3',
639 | extension_ranges=[],
640 | oneofs=[
641 | ],
642 | serialized_start=1453,
643 | serialized_end=1490,
644 | )
645 |
646 |
647 | _COMMITRESPONSE = _descriptor.Descriptor(
648 | name='CommitResponse',
649 | full_name='CommitResponse',
650 | filename=None,
651 | file=DESCRIPTOR,
652 | containing_type=None,
653 | fields=[
654 | ],
655 | extensions=[
656 | ],
657 | nested_types=[],
658 | enum_types=[
659 | ],
660 | options=None,
661 | is_extendable=False,
662 | syntax='proto3',
663 | extension_ranges=[],
664 | oneofs=[
665 | ],
666 | serialized_start=1492,
667 | serialized_end=1508,
668 | )
669 |
670 |
671 | _ROLLBACKRESPONSE = _descriptor.Descriptor(
672 | name='RollbackResponse',
673 | full_name='RollbackResponse',
674 | filename=None,
675 | file=DESCRIPTOR,
676 | containing_type=None,
677 | fields=[
678 | ],
679 | extensions=[
680 | ],
681 | nested_types=[],
682 | enum_types=[
683 | ],
684 | options=None,
685 | is_extendable=False,
686 | syntax='proto3',
687 | extension_ranges=[],
688 | oneofs=[
689 | ],
690 | serialized_start=1510,
691 | serialized_end=1528,
692 | )
693 |
694 |
695 | _EXECUTEBATCHRESPONSE = _descriptor.Descriptor(
696 | name='ExecuteBatchResponse',
697 | full_name='ExecuteBatchResponse',
698 | filename=None,
699 | file=DESCRIPTOR,
700 | containing_type=None,
701 | fields=[
702 | _descriptor.FieldDescriptor(
703 | name='connection_id', full_name='ExecuteBatchResponse.connection_id', index=0,
704 | number=1, type=9, cpp_type=9, label=1,
705 | has_default_value=False, default_value=_b("").decode('utf-8'),
706 | message_type=None, enum_type=None, containing_type=None,
707 | is_extension=False, extension_scope=None,
708 | options=None),
709 | _descriptor.FieldDescriptor(
710 | name='statement_id', full_name='ExecuteBatchResponse.statement_id', index=1,
711 | number=2, type=13, cpp_type=3, label=1,
712 | has_default_value=False, default_value=0,
713 | message_type=None, enum_type=None, containing_type=None,
714 | is_extension=False, extension_scope=None,
715 | options=None),
716 | _descriptor.FieldDescriptor(
717 | name='update_counts', full_name='ExecuteBatchResponse.update_counts', index=2,
718 | number=3, type=4, cpp_type=4, label=3,
719 | has_default_value=False, default_value=[],
720 | message_type=None, enum_type=None, containing_type=None,
721 | is_extension=False, extension_scope=None,
722 | options=None),
723 | _descriptor.FieldDescriptor(
724 | name='missing_statement', full_name='ExecuteBatchResponse.missing_statement', index=3,
725 | number=4, type=8, cpp_type=7, label=1,
726 | has_default_value=False, default_value=False,
727 | message_type=None, enum_type=None, containing_type=None,
728 | is_extension=False, extension_scope=None,
729 | options=None),
730 | _descriptor.FieldDescriptor(
731 | name='metadata', full_name='ExecuteBatchResponse.metadata', index=4,
732 | number=5, type=11, cpp_type=10, label=1,
733 | has_default_value=False, default_value=None,
734 | message_type=None, enum_type=None, containing_type=None,
735 | is_extension=False, extension_scope=None,
736 | options=None),
737 | ],
738 | extensions=[
739 | ],
740 | nested_types=[],
741 | enum_types=[
742 | ],
743 | options=None,
744 | is_extendable=False,
745 | syntax='proto3',
746 | extension_ranges=[],
747 | oneofs=[
748 | ],
749 | serialized_start=1531,
750 | serialized_end=1680,
751 | )
752 |
753 | _RESULTSETRESPONSE.fields_by_name['signature'].message_type = common__pb2._SIGNATURE
754 | _RESULTSETRESPONSE.fields_by_name['first_frame'].message_type = common__pb2._FRAME
755 | _RESULTSETRESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA
756 | _EXECUTERESPONSE.fields_by_name['results'].message_type = _RESULTSETRESPONSE
757 | _EXECUTERESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA
758 | _PREPARERESPONSE.fields_by_name['statement'].message_type = common__pb2._STATEMENTHANDLE
759 | _PREPARERESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA
760 | _FETCHRESPONSE.fields_by_name['frame'].message_type = common__pb2._FRAME
761 | _FETCHRESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA
762 | _CREATESTATEMENTRESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA
763 | _CLOSESTATEMENTRESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA
764 | _OPENCONNECTIONRESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA
765 | _CLOSECONNECTIONRESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA
766 | _CONNECTIONSYNCRESPONSE.fields_by_name['conn_props'].message_type = common__pb2._CONNECTIONPROPERTIES
767 | _CONNECTIONSYNCRESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA
768 | _DATABASEPROPERTYELEMENT.fields_by_name['key'].message_type = common__pb2._DATABASEPROPERTY
769 | _DATABASEPROPERTYELEMENT.fields_by_name['value'].message_type = common__pb2._TYPEDVALUE
770 | _DATABASEPROPERTYELEMENT.fields_by_name['metadata'].message_type = _RPCMETADATA
771 | _DATABASEPROPERTYRESPONSE.fields_by_name['props'].message_type = _DATABASEPROPERTYELEMENT
772 | _DATABASEPROPERTYRESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA
773 | _ERRORRESPONSE.fields_by_name['severity'].enum_type = common__pb2._SEVERITY
774 | _ERRORRESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA
775 | _SYNCRESULTSRESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA
776 | _EXECUTEBATCHRESPONSE.fields_by_name['metadata'].message_type = _RPCMETADATA
777 | DESCRIPTOR.message_types_by_name['ResultSetResponse'] = _RESULTSETRESPONSE
778 | DESCRIPTOR.message_types_by_name['ExecuteResponse'] = _EXECUTERESPONSE
779 | DESCRIPTOR.message_types_by_name['PrepareResponse'] = _PREPARERESPONSE
780 | DESCRIPTOR.message_types_by_name['FetchResponse'] = _FETCHRESPONSE
781 | DESCRIPTOR.message_types_by_name['CreateStatementResponse'] = _CREATESTATEMENTRESPONSE
782 | DESCRIPTOR.message_types_by_name['CloseStatementResponse'] = _CLOSESTATEMENTRESPONSE
783 | DESCRIPTOR.message_types_by_name['OpenConnectionResponse'] = _OPENCONNECTIONRESPONSE
784 | DESCRIPTOR.message_types_by_name['CloseConnectionResponse'] = _CLOSECONNECTIONRESPONSE
785 | DESCRIPTOR.message_types_by_name['ConnectionSyncResponse'] = _CONNECTIONSYNCRESPONSE
786 | DESCRIPTOR.message_types_by_name['DatabasePropertyElement'] = _DATABASEPROPERTYELEMENT
787 | DESCRIPTOR.message_types_by_name['DatabasePropertyResponse'] = _DATABASEPROPERTYRESPONSE
788 | DESCRIPTOR.message_types_by_name['ErrorResponse'] = _ERRORRESPONSE
789 | DESCRIPTOR.message_types_by_name['SyncResultsResponse'] = _SYNCRESULTSRESPONSE
790 | DESCRIPTOR.message_types_by_name['RpcMetadata'] = _RPCMETADATA
791 | DESCRIPTOR.message_types_by_name['CommitResponse'] = _COMMITRESPONSE
792 | DESCRIPTOR.message_types_by_name['RollbackResponse'] = _ROLLBACKRESPONSE
793 | DESCRIPTOR.message_types_by_name['ExecuteBatchResponse'] = _EXECUTEBATCHRESPONSE
794 |
795 | ResultSetResponse = _reflection.GeneratedProtocolMessageType('ResultSetResponse', (_message.Message,), dict(
796 | DESCRIPTOR = _RESULTSETRESPONSE,
797 | __module__ = 'responses_pb2'
798 | # @@protoc_insertion_point(class_scope:ResultSetResponse)
799 | ))
800 | _sym_db.RegisterMessage(ResultSetResponse)
801 |
802 | ExecuteResponse = _reflection.GeneratedProtocolMessageType('ExecuteResponse', (_message.Message,), dict(
803 | DESCRIPTOR = _EXECUTERESPONSE,
804 | __module__ = 'responses_pb2'
805 | # @@protoc_insertion_point(class_scope:ExecuteResponse)
806 | ))
807 | _sym_db.RegisterMessage(ExecuteResponse)
808 |
809 | PrepareResponse = _reflection.GeneratedProtocolMessageType('PrepareResponse', (_message.Message,), dict(
810 | DESCRIPTOR = _PREPARERESPONSE,
811 | __module__ = 'responses_pb2'
812 | # @@protoc_insertion_point(class_scope:PrepareResponse)
813 | ))
814 | _sym_db.RegisterMessage(PrepareResponse)
815 |
816 | FetchResponse = _reflection.GeneratedProtocolMessageType('FetchResponse', (_message.Message,), dict(
817 | DESCRIPTOR = _FETCHRESPONSE,
818 | __module__ = 'responses_pb2'
819 | # @@protoc_insertion_point(class_scope:FetchResponse)
820 | ))
821 | _sym_db.RegisterMessage(FetchResponse)
822 |
823 | CreateStatementResponse = _reflection.GeneratedProtocolMessageType('CreateStatementResponse', (_message.Message,), dict(
824 | DESCRIPTOR = _CREATESTATEMENTRESPONSE,
825 | __module__ = 'responses_pb2'
826 | # @@protoc_insertion_point(class_scope:CreateStatementResponse)
827 | ))
828 | _sym_db.RegisterMessage(CreateStatementResponse)
829 |
830 | CloseStatementResponse = _reflection.GeneratedProtocolMessageType('CloseStatementResponse', (_message.Message,), dict(
831 | DESCRIPTOR = _CLOSESTATEMENTRESPONSE,
832 | __module__ = 'responses_pb2'
833 | # @@protoc_insertion_point(class_scope:CloseStatementResponse)
834 | ))
835 | _sym_db.RegisterMessage(CloseStatementResponse)
836 |
837 | OpenConnectionResponse = _reflection.GeneratedProtocolMessageType('OpenConnectionResponse', (_message.Message,), dict(
838 | DESCRIPTOR = _OPENCONNECTIONRESPONSE,
839 | __module__ = 'responses_pb2'
840 | # @@protoc_insertion_point(class_scope:OpenConnectionResponse)
841 | ))
842 | _sym_db.RegisterMessage(OpenConnectionResponse)
843 |
844 | CloseConnectionResponse = _reflection.GeneratedProtocolMessageType('CloseConnectionResponse', (_message.Message,), dict(
845 | DESCRIPTOR = _CLOSECONNECTIONRESPONSE,
846 | __module__ = 'responses_pb2'
847 | # @@protoc_insertion_point(class_scope:CloseConnectionResponse)
848 | ))
849 | _sym_db.RegisterMessage(CloseConnectionResponse)
850 |
851 | ConnectionSyncResponse = _reflection.GeneratedProtocolMessageType('ConnectionSyncResponse', (_message.Message,), dict(
852 | DESCRIPTOR = _CONNECTIONSYNCRESPONSE,
853 | __module__ = 'responses_pb2'
854 | # @@protoc_insertion_point(class_scope:ConnectionSyncResponse)
855 | ))
856 | _sym_db.RegisterMessage(ConnectionSyncResponse)
857 |
858 | DatabasePropertyElement = _reflection.GeneratedProtocolMessageType('DatabasePropertyElement', (_message.Message,), dict(
859 | DESCRIPTOR = _DATABASEPROPERTYELEMENT,
860 | __module__ = 'responses_pb2'
861 | # @@protoc_insertion_point(class_scope:DatabasePropertyElement)
862 | ))
863 | _sym_db.RegisterMessage(DatabasePropertyElement)
864 |
865 | DatabasePropertyResponse = _reflection.GeneratedProtocolMessageType('DatabasePropertyResponse', (_message.Message,), dict(
866 | DESCRIPTOR = _DATABASEPROPERTYRESPONSE,
867 | __module__ = 'responses_pb2'
868 | # @@protoc_insertion_point(class_scope:DatabasePropertyResponse)
869 | ))
870 | _sym_db.RegisterMessage(DatabasePropertyResponse)
871 |
872 | ErrorResponse = _reflection.GeneratedProtocolMessageType('ErrorResponse', (_message.Message,), dict(
873 | DESCRIPTOR = _ERRORRESPONSE,
874 | __module__ = 'responses_pb2'
875 | # @@protoc_insertion_point(class_scope:ErrorResponse)
876 | ))
877 | _sym_db.RegisterMessage(ErrorResponse)
878 |
879 | SyncResultsResponse = _reflection.GeneratedProtocolMessageType('SyncResultsResponse', (_message.Message,), dict(
880 | DESCRIPTOR = _SYNCRESULTSRESPONSE,
881 | __module__ = 'responses_pb2'
882 | # @@protoc_insertion_point(class_scope:SyncResultsResponse)
883 | ))
884 | _sym_db.RegisterMessage(SyncResultsResponse)
885 |
886 | RpcMetadata = _reflection.GeneratedProtocolMessageType('RpcMetadata', (_message.Message,), dict(
887 | DESCRIPTOR = _RPCMETADATA,
888 | __module__ = 'responses_pb2'
889 | # @@protoc_insertion_point(class_scope:RpcMetadata)
890 | ))
891 | _sym_db.RegisterMessage(RpcMetadata)
892 |
893 | CommitResponse = _reflection.GeneratedProtocolMessageType('CommitResponse', (_message.Message,), dict(
894 | DESCRIPTOR = _COMMITRESPONSE,
895 | __module__ = 'responses_pb2'
896 | # @@protoc_insertion_point(class_scope:CommitResponse)
897 | ))
898 | _sym_db.RegisterMessage(CommitResponse)
899 |
900 | RollbackResponse = _reflection.GeneratedProtocolMessageType('RollbackResponse', (_message.Message,), dict(
901 | DESCRIPTOR = _ROLLBACKRESPONSE,
902 | __module__ = 'responses_pb2'
903 | # @@protoc_insertion_point(class_scope:RollbackResponse)
904 | ))
905 | _sym_db.RegisterMessage(RollbackResponse)
906 |
907 | ExecuteBatchResponse = _reflection.GeneratedProtocolMessageType('ExecuteBatchResponse', (_message.Message,), dict(
908 | DESCRIPTOR = _EXECUTEBATCHRESPONSE,
909 | __module__ = 'responses_pb2'
910 | # @@protoc_insertion_point(class_scope:ExecuteBatchResponse)
911 | ))
912 | _sym_db.RegisterMessage(ExecuteBatchResponse)
913 |
914 |
915 | DESCRIPTOR.has_options = True
916 | DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n org.apache.calcite.avatica.proto'))
917 | # @@protoc_insertion_point(module_scope)
918 |
--------------------------------------------------------------------------------
/phoenixdb/connection.py:
--------------------------------------------------------------------------------
1 | # Copyright 2015 Lukas Lalinsky
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import logging
16 | import uuid
17 | import weakref
18 | from phoenixdb import errors
19 | from phoenixdb.avatica.client import OPEN_CONNECTION_PROPERTIES
20 | from phoenixdb.cursor import Cursor
21 | from phoenixdb.errors import ProgrammingError
22 |
23 | __all__ = ['Connection']
24 |
25 | logger = logging.getLogger(__name__)
26 |
27 |
28 | class Connection(object):
29 | """Database connection.
30 |
31 | You should not construct this object manually, use :func:`~phoenixdb.connect` instead.
32 | """
33 |
34 | cursor_factory = None
35 | """
36 | The default cursor factory used by :meth:`cursor` if the parameter is not specified.
37 | """
38 |
39 | def __init__(self, client, cursor_factory=None, **kwargs):
40 | self._client = client
41 | self._closed = False
42 | if cursor_factory is not None:
43 | self.cursor_factory = cursor_factory
44 | else:
45 | self.cursor_factory = Cursor
46 | self._cursors = []
47 | # Extract properties to pass to OpenConnectionRequest
48 | self._connection_args = {}
49 | # The rest of the kwargs
50 | self._filtered_args = {}
51 | for k in kwargs:
52 | if k in OPEN_CONNECTION_PROPERTIES:
53 | self._connection_args[k] = kwargs[k]
54 | else:
55 | self._filtered_args[k] = kwargs[k]
56 | self.open()
57 | self.set_session(**self._filtered_args)
58 |
59 | def __del__(self):
60 | if not self._closed:
61 | self.close()
62 |
63 | def __enter__(self):
64 | return self
65 |
66 | def __exit__(self, exc_type, exc_value, traceback):
67 | if not self._closed:
68 | self.close()
69 |
70 | def open(self):
71 | """Opens the connection."""
72 | self._id = str(uuid.uuid4())
73 | self._client.open_connection(self._id, info=self._connection_args)
74 |
75 | def close(self):
76 | """Closes the connection.
77 | No further operations are allowed, either on the connection or any
78 | of its cursors, once the connection is closed.
79 |
80 | If the connection is used in a ``with`` statement, this method will
81 | be automatically called at the end of the ``with`` block.
82 | """
83 | if self._closed:
84 | raise ProgrammingError('the connection is already closed')
85 | for cursor_ref in self._cursors:
86 | cursor = cursor_ref()
87 | if cursor is not None and not cursor._closed:
88 | cursor.close()
89 | self._client.close_connection(self._id)
90 | self._client.close()
91 | self._closed = True
92 |
93 | @property
94 | def closed(self):
95 | """Read-only attribute specifying if the connection is closed or not."""
96 | return self._closed
97 |
98 | def commit(self):
99 | """Commits pending database changes.
100 |
101 | Currently, this does nothing, because the RPC does not support
102 | transactions. Only defined for DB API 2.0 compatibility.
103 | You need to use :attr:`autocommit` mode.
104 | """
105 | # TODO can support be added for this?
106 | if self._closed:
107 | raise ProgrammingError('the connection is already closed')
108 |
109 | def cursor(self, cursor_factory=None):
110 | """Creates a new cursor.
111 |
112 | :param cursor_factory:
113 | This argument can be used to create non-standard cursors.
114 | The class returned must be a subclass of
115 | :class:`~phoenixdb.cursor.Cursor` (for example :class:`~phoenixdb.cursor.DictCursor`).
116 | A default factory for the connection can also be specified using the
117 | :attr:`cursor_factory` attribute.
118 |
119 | :returns:
120 | A :class:`~phoenixdb.cursor.Cursor` object.
121 | """
122 | if self._closed:
123 | raise ProgrammingError('the connection is already closed')
124 | cursor = (cursor_factory or self.cursor_factory)(self)
125 | self._cursors.append(weakref.ref(cursor, self._cursors.remove))
126 | return cursor
127 |
128 | def set_session(self, autocommit=None, readonly=None):
129 | """Sets one or more parameters in the current connection.
130 |
131 | :param autocommit:
132 | Switch the connection to autocommit mode. With the current
133 | version, you need to always enable this, because
134 | :meth:`commit` is not implemented.
135 |
136 | :param readonly:
137 | Switch the connection to read-only mode.
138 | """
139 | props = {}
140 | if autocommit is not None:
141 | props['autoCommit'] = bool(autocommit)
142 | if readonly is not None:
143 | props['readOnly'] = bool(readonly)
144 | props = self._client.connection_sync(self._id, props)
145 | self._autocommit = props.auto_commit
146 | self._readonly = props.read_only
147 | self._transactionisolation = props.transaction_isolation
148 |
149 | @property
150 | def autocommit(self):
151 | """Read/write attribute for switching the connection's autocommit mode."""
152 | return self._autocommit
153 |
154 | @autocommit.setter
155 | def autocommit(self, value):
156 | if self._closed:
157 | raise ProgrammingError('the connection is already closed')
158 | props = self._client.connection_sync(self._id, {'autoCommit': bool(value)})
159 | self._autocommit = props.auto_commit
160 |
161 | @property
162 | def readonly(self):
163 | """Read/write attribute for switching the connection's readonly mode."""
164 | return self._readonly
165 |
166 | @readonly.setter
167 | def readonly(self, value):
168 | if self._closed:
169 | raise ProgrammingError('the connection is already closed')
170 | props = self._client.connection_sync(self._id, {'readOnly': bool(value)})
171 | self._readonly = props.read_only
172 |
173 | @property
174 | def transactionisolation(self):
175 | return self._transactionisolation
176 |
177 | @transactionisolation.setter
178 | def transactionisolation(self, value):
179 | if self._closed:
180 | raise ProgrammingError('the connection is already closed')
181 | props = self._client.connection_sync(self._id, {'transactionIsolation': bool(value)})
182 | self._transactionisolation = props.transaction_isolation
183 |
184 |
185 | for name in errors.__all__:
186 | setattr(Connection, name, getattr(errors, name))
187 |
--------------------------------------------------------------------------------
/phoenixdb/cursor.py:
--------------------------------------------------------------------------------
1 | # Copyright 2015 Lukas Lalinsky
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import logging
16 | import collections
17 | from phoenixdb.types import TypeHelper
18 | from phoenixdb.errors import ProgrammingError, InternalError
19 | from phoenixdb.avatica.proto import common_pb2
20 |
21 | __all__ = ['Cursor', 'ColumnDescription', 'DictCursor']
22 |
23 | logger = logging.getLogger(__name__)
24 |
25 | # TODO see note in Cursor.rowcount()
26 | MAX_INT = 2 ** 64 - 1
27 |
28 | ColumnDescription = collections.namedtuple('ColumnDescription', 'name type_code display_size internal_size precision scale null_ok')
29 | """Named tuple for representing results from :attr:`Cursor.description`."""
30 |
31 |
32 | class Cursor(object):
33 | """Database cursor for executing queries and iterating over results.
34 |
35 | You should not construct this object manually, use :meth:`Connection.cursor() ` instead.
36 | """
37 |
38 | arraysize = 1
39 | """
40 | Read/write attribute specifying the number of rows to fetch
41 | at a time with :meth:`fetchmany`. It defaults to 1 meaning to
42 | fetch a single row at a time.
43 | """
44 |
45 | itersize = 2000
46 | """
47 | Read/write attribute specifying the number of rows to fetch
48 | from the backend at each network roundtrip during iteration
49 | on the cursor. The default is 2000.
50 | """
51 |
52 | def __init__(self, connection, id=None):
53 | self._connection = connection
54 | self._id = id
55 | self._signature = None
56 | self._column_data_types = []
57 | self._frame = None
58 | self._pos = None
59 | self._closed = False
60 | self.arraysize = self.__class__.arraysize
61 | self.itersize = self.__class__.itersize
62 | self._updatecount = -1
63 |
64 | def __del__(self):
65 | if not self._connection._closed and not self._closed:
66 | self.close()
67 |
68 | def __enter__(self):
69 | return self
70 |
71 | def __exit__(self, exc_type, exc_value, traceback):
72 | if not self._closed:
73 | self.close()
74 |
75 | def __iter__(self):
76 | return self
77 |
78 | def __next__(self):
79 | row = self.fetchone()
80 | if row is None:
81 | raise StopIteration
82 | return row
83 |
84 | next = __next__
85 |
86 | def close(self):
87 | """Closes the cursor.
88 | No further operations are allowed once the cursor is closed.
89 |
90 | If the cursor is used in a ``with`` statement, this method will
91 | be automatically called at the end of the ``with`` block.
92 | """
93 | if self._closed:
94 | raise ProgrammingError('the cursor is already closed')
95 | if self._id is not None:
96 | self._connection._client.close_statement(self._connection._id, self._id)
97 | self._id = None
98 | self._signature = None
99 | self._column_data_types = []
100 | self._frame = None
101 | self._pos = None
102 | self._closed = True
103 |
104 | @property
105 | def closed(self):
106 | """Read-only attribute specifying if the cursor is closed or not."""
107 | return self._closed
108 |
109 | @property
110 | def description(self):
111 | if self._signature is None:
112 | return None
113 | description = []
114 | for column in self._signature.columns:
115 | description.append(ColumnDescription(
116 | column.column_name,
117 | column.type.name,
118 | column.display_size,
119 | None,
120 | column.precision,
121 | column.scale,
122 | None if column.nullable == 2 else bool(column.nullable),
123 | ))
124 | return description
125 |
126 | def _set_id(self, id):
127 | if self._id is not None and self._id != id:
128 | self._connection._client.close_statement(self._connection._id, self._id)
129 | self._id = id
130 |
131 | def _set_signature(self, signature):
132 | self._signature = signature
133 | self._column_data_types = []
134 | self._parameter_data_types = []
135 | if signature is None:
136 | return
137 |
138 | for column in signature.columns:
139 | dtype = TypeHelper.from_class(column.column_class_name)
140 | self._column_data_types.append(dtype)
141 |
142 | for parameter in signature.parameters:
143 | dtype = TypeHelper.from_class(parameter.class_name)
144 | self._parameter_data_types.append(dtype)
145 |
146 | def _set_frame(self, frame):
147 | self._frame = frame
148 | self._pos = None
149 |
150 | if frame is not None:
151 | if frame.rows:
152 | self._pos = 0
153 | elif not frame.done:
154 | raise InternalError('got an empty frame, but the statement is not done yet')
155 |
156 | def _fetch_next_frame(self):
157 | offset = self._frame.offset + len(self._frame.rows)
158 | frame = self._connection._client.fetch(
159 | self._connection._id, self._id,
160 | offset=offset, frame_max_size=self.itersize)
161 | self._set_frame(frame)
162 |
163 | def _process_results(self, results):
164 | if results:
165 | result = results[0]
166 | if result.own_statement:
167 | self._set_id(result.statement_id)
168 | self._set_signature(result.signature if result.HasField('signature') else None)
169 | self._set_frame(result.first_frame if result.HasField('first_frame') else None)
170 | self._updatecount = result.update_count
171 |
172 | def _transform_parameters(self, parameters):
173 | typed_parameters = []
174 | for value, data_type in zip(parameters, self._parameter_data_types):
175 | field_name, rep, mutate_to, cast_from = data_type
176 | typed_value = common_pb2.TypedValue()
177 |
178 | if value is None:
179 | typed_value.null = True
180 | typed_value.type = common_pb2.NULL
181 | else:
182 | typed_value.null = False
183 |
184 | # use the mutator function
185 | if mutate_to is not None:
186 | value = mutate_to(value)
187 |
188 | typed_value.type = rep
189 | setattr(typed_value, field_name, value)
190 |
191 | typed_parameters.append(typed_value)
192 | return typed_parameters
193 |
194 | def execute(self, operation, parameters=None):
195 | if self._closed:
196 | raise ProgrammingError('the cursor is already closed')
197 | self._updatecount = -1
198 | self._set_frame(None)
199 | if parameters is None:
200 | if self._id is None:
201 | self._set_id(self._connection._client.create_statement(self._connection._id))
202 | results = self._connection._client.prepare_and_execute(
203 | self._connection._id, self._id,
204 | operation, first_frame_max_size=self.itersize)
205 | self._process_results(results)
206 | else:
207 | statement = self._connection._client.prepare(
208 | self._connection._id, operation)
209 | self._set_id(statement.id)
210 | self._set_signature(statement.signature)
211 |
212 | results = self._connection._client.execute(
213 | self._connection._id, self._id,
214 | statement.signature, self._transform_parameters(parameters),
215 | first_frame_max_size=self.itersize)
216 | self._process_results(results)
217 |
218 | def executemany(self, operation, seq_of_parameters):
219 | if self._closed:
220 | raise ProgrammingError('the cursor is already closed')
221 | self._updatecount = -1
222 | self._set_frame(None)
223 | statement = self._connection._client.prepare(
224 | self._connection._id, operation, max_rows_total=0)
225 | self._set_id(statement.id)
226 | self._set_signature(statement.signature)
227 | for parameters in seq_of_parameters:
228 | self._connection._client.execute(
229 | self._connection._id, self._id,
230 | statement.signature, self._transform_parameters(parameters),
231 | first_frame_max_size=0)
232 |
233 | def _transform_row(self, row):
234 | """Transforms a Row into Python values.
235 |
236 | :param row:
237 | A ``common_pb2.Row`` object.
238 |
239 | :returns:
240 | A list of values casted into the correct Python types.
241 |
242 | :raises:
243 | NotImplementedError
244 | """
245 | tmp_row = []
246 |
247 | for i, column in enumerate(row.value):
248 | if column.has_array_value:
249 | raise NotImplementedError('array types are not supported')
250 | elif column.scalar_value.null:
251 | tmp_row.append(None)
252 | else:
253 | field_name, rep, mutate_to, cast_from = self._column_data_types[i]
254 |
255 | # get the value from the field_name
256 | value = getattr(column.scalar_value, field_name)
257 |
258 | # cast the value
259 | if cast_from is not None:
260 | value = cast_from(value)
261 |
262 | tmp_row.append(value)
263 | return tmp_row
264 |
265 | def fetchone(self):
266 | if self._frame is None:
267 | raise ProgrammingError('no select statement was executed')
268 | if self._pos is None:
269 | return None
270 | rows = self._frame.rows
271 | row = self._transform_row(rows[self._pos])
272 | self._pos += 1
273 | if self._pos >= len(rows):
274 | self._pos = None
275 | if not self._frame.done:
276 | self._fetch_next_frame()
277 | return row
278 |
279 | def fetchmany(self, size=None):
280 | if size is None:
281 | size = self.arraysize
282 | rows = []
283 | while size > 0:
284 | row = self.fetchone()
285 | if row is None:
286 | break
287 | rows.append(row)
288 | size -= 1
289 | return rows
290 |
291 | def fetchall(self):
292 | rows = []
293 | while True:
294 | row = self.fetchone()
295 | if row is None:
296 | break
297 | rows.append(row)
298 | return rows
299 |
300 | def setinputsizes(self, sizes):
301 | pass
302 |
303 | def setoutputsize(self, size, column=None):
304 | pass
305 |
306 | @property
307 | def connection(self):
308 | """Read-only attribute providing access to the :class:`Connection `
309 | object this cursor was created from."""
310 | return self._connection
311 |
312 | @property
313 | def rowcount(self):
314 | """Read-only attribute specifying the number of rows affected by
315 | the last executed DML statement or -1 if the number cannot be
316 | determined. Note that this will always be set to -1 for select
317 | queries."""
318 | # TODO instead of -1, this ends up being set to Integer.MAX_VALUE
319 | if self._updatecount == MAX_INT:
320 | return -1
321 | return self._updatecount
322 |
323 | @property
324 | def rownumber(self):
325 | """Read-only attribute providing the current 0-based index of the
326 | cursor in the result set or ``None`` if the index cannot be
327 | determined.
328 |
329 | The index can be seen as index of the cursor in a sequence
330 | (the result set). The next fetch operation will fetch the
331 | row indexed by :attr:`rownumber` in that sequence.
332 | """
333 | if self._frame is not None and self._pos is not None:
334 | return self._frame.offset + self._pos
335 | return self._pos
336 |
337 |
338 | class DictCursor(Cursor):
339 | """A cursor which returns results as a dictionary"""
340 |
341 | def _transform_row(self, row):
342 | row = super(DictCursor, self)._transform_row(row)
343 | d = {}
344 | for ind, val in enumerate(row):
345 | d[self._signature.columns[ind].column_name] = val
346 | return d
347 |
--------------------------------------------------------------------------------
/phoenixdb/errors.py:
--------------------------------------------------------------------------------
1 | # Copyright 2015 Lukas Lalinsky
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | __all__ = [
16 | 'Warning', 'Error', 'InterfaceError', 'DatabaseError', 'DataError',
17 | 'OperationalError', 'IntegrityError', 'InternalError',
18 | 'ProgrammingError', 'NotSupportedError',
19 | ]
20 |
21 | try:
22 | _StandardError = StandardError
23 | except NameError:
24 | _StandardError = Exception
25 |
26 |
27 | class Warning(_StandardError):
28 | """Not used by this package, only defined for compatibility
29 | with DB API 2.0."""
30 |
31 |
32 | class Error(_StandardError):
33 | """Exception that is the base class of all other error exceptions.
34 | You can use this to catch all errors with one single except statement."""
35 |
36 | def __init__(self, message, code=None, sqlstate=None, cause=None):
37 | super(_StandardError, self).__init__(message, code, sqlstate, cause)
38 |
39 | @property
40 | def message(self):
41 | return self.args[0]
42 |
43 | @property
44 | def code(self):
45 | return self.args[1]
46 |
47 | @property
48 | def sqlstate(self):
49 | return self.args[2]
50 |
51 | @property
52 | def cause(self):
53 | return self.args[3]
54 |
55 |
56 | class InterfaceError(Error):
57 | """Exception raised for errors that are related to the database
58 | interface rather than the database itself."""
59 |
60 |
61 | class DatabaseError(Error):
62 | """Exception raised for errors that are related to the database."""
63 |
64 |
65 | class DataError(DatabaseError):
66 | """Exception raised for errors that are due to problems with the
67 | processed data like division by zero, numeric value out of range,
68 | etc."""
69 |
70 |
71 | class OperationalError(DatabaseError):
72 | """Raised for errors that are related to the database's operation and not
73 | necessarily under the control of the programmer, e.g. an unexpected
74 | disconnect occurs, the data source name is not found, a transaction could
75 | not be processed, a memory allocation error occurred during
76 | processing, etc."""
77 |
78 |
79 | class IntegrityError(DatabaseError):
80 | """Raised when the relational integrity of the database is affected, e.g. a foreign key check fails."""
81 |
82 |
83 | class InternalError(DatabaseError):
84 | """Raised when the database encounters an internal problem."""
85 |
86 |
87 | class ProgrammingError(DatabaseError):
88 | """Raises for programming errors, e.g. table not found, syntax error, etc."""
89 |
90 |
91 | class NotSupportedError(DatabaseError):
92 | """Raised when using an API that is not supported by the database."""
93 |
--------------------------------------------------------------------------------
/phoenixdb/tests/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | import unittest
3 | import phoenixdb
4 |
5 | TEST_DB_URL = os.environ.get('PHOENIXDB_TEST_DB_URL')
6 |
7 |
8 | @unittest.skipIf(TEST_DB_URL is None, "these tests require the PHOENIXDB_TEST_DB_URL environment variable set to a clean database")
9 | class DatabaseTestCase(unittest.TestCase):
10 |
11 | def setUp(self):
12 | self.conn = phoenixdb.connect(TEST_DB_URL, autocommit=True)
13 | self.cleanup_tables = []
14 |
15 | def tearDown(self):
16 | self.doCleanups()
17 | self.conn.close()
18 |
19 | def addTableCleanup(self, name):
20 | def dropTable():
21 | with self.conn.cursor() as cursor:
22 | cursor.execute("DROP TABLE IF EXISTS {}".format(name))
23 | self.addCleanup(dropTable)
24 |
25 | def createTable(self, name, columns):
26 | with self.conn.cursor() as cursor:
27 | cursor.execute("DROP TABLE IF EXISTS {}".format(name))
28 | cursor.execute("CREATE TABLE {} ({})".format(name, columns))
29 | self.addTableCleanup(name)
30 |
--------------------------------------------------------------------------------
/phoenixdb/tests/dbapi20.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | ''' Python DB API 2.0 driver compliance unit test suite.
3 |
4 | This software is Public Domain and may be used without restrictions.
5 |
6 | "Now we have booze and barflies entering the discussion, plus rumours of
7 | DBAs on drugs... and I won't tell you what flashes through my mind each
8 | time I read the subject line with 'Anal Compliance' in it. All around
9 | this is turning out to be a thoroughly unwholesome unit test."
10 |
11 | -- Ian Bicking
12 | '''
13 |
14 | __version__ = '1.14.3'
15 |
16 | import unittest
17 | import time
18 | import sys
19 |
20 | if sys.version[0] >= '3': #python 3.x
21 | _BaseException = Exception
22 | def _failUnless(self, expr, msg=None):
23 | self.assertTrue(expr, msg)
24 | else: #python 2.x
25 | from exceptions import StandardError as _BaseException
26 | def _failUnless(self, expr, msg=None):
27 | self.failUnless(expr, msg) ## deprecated since Python 2.6
28 |
29 | def str2bytes(sval):
30 | if sys.version_info < (3,0) and isinstance(sval, str):
31 | sval = sval.decode("latin1")
32 | return sval.encode("latin1") #python 3 make unicode into bytes
33 |
34 | class DatabaseAPI20Test(unittest.TestCase):
35 | ''' Test a database self.driver for DB API 2.0 compatibility.
36 | This implementation tests Gadfly, but the TestCase
37 | is structured so that other self.drivers can subclass this
38 | test case to ensure compiliance with the DB-API. It is
39 | expected that this TestCase may be expanded in the future
40 | if ambiguities or edge conditions are discovered.
41 |
42 | The 'Optional Extensions' are not yet being tested.
43 |
44 | self.drivers should subclass this test, overriding setUp, tearDown,
45 | self.driver, connect_args and connect_kw_args. Class specification
46 | should be as follows:
47 |
48 | import dbapi20
49 | class mytest(dbapi20.DatabaseAPI20Test):
50 | [...]
51 |
52 | Don't 'import DatabaseAPI20Test from dbapi20', or you will
53 | confuse the unit tester - just 'import dbapi20'.
54 | '''
55 |
56 | # The self.driver module. This should be the module where the 'connect'
57 | # method is to be found
58 | driver = None
59 | connect_args = () # List of arguments to pass to connect
60 | connect_kw_args = {} # Keyword arguments for connect
61 | table_prefix = 'dbapi20test_' # If you need to specify a prefix for tables
62 |
63 | ddl1 = 'create table %sbooze (name varchar(20))' % table_prefix
64 | ddl2 = 'create table %sbarflys (name varchar(20), drink varchar(30))' % table_prefix
65 | xddl1 = 'drop table %sbooze' % table_prefix
66 | xddl2 = 'drop table %sbarflys' % table_prefix
67 | insert = 'insert'
68 |
69 | lowerfunc = 'lower' # Name of stored procedure to convert string->lowercase
70 |
71 | # Some drivers may need to override these helpers, for example adding
72 | # a 'commit' after the execute.
73 | def executeDDL1(self,cursor):
74 | cursor.execute(self.ddl1)
75 |
76 | def executeDDL2(self,cursor):
77 | cursor.execute(self.ddl2)
78 |
79 | def setUp(self):
80 | ''' self.drivers should override this method to perform required setup
81 | if any is necessary, such as creating the database.
82 | '''
83 | pass
84 |
85 | def tearDown(self):
86 | ''' self.drivers should override this method to perform required cleanup
87 | if any is necessary, such as deleting the test database.
88 | The default drops the tables that may be created.
89 | '''
90 | try:
91 | con = self._connect()
92 | try:
93 | cur = con.cursor()
94 | for ddl in (self.xddl1,self.xddl2):
95 | try:
96 | cur.execute(ddl)
97 | con.commit()
98 | except self.driver.Error:
99 | # Assume table didn't exist. Other tests will check if
100 | # execute is busted.
101 | pass
102 | finally:
103 | con.close()
104 | except _BaseException:
105 | pass
106 |
107 | def _connect(self):
108 | try:
109 | r = self.driver.connect(
110 | *self.connect_args,**self.connect_kw_args
111 | )
112 | except AttributeError:
113 | self.fail("No connect method found in self.driver module")
114 | return r
115 |
116 | def test_connect(self):
117 | con = self._connect()
118 | con.close()
119 |
120 | def test_apilevel(self):
121 | try:
122 | # Must exist
123 | apilevel = self.driver.apilevel
124 | # Must equal 2.0
125 | self.assertEqual(apilevel,'2.0')
126 | except AttributeError:
127 | self.fail("Driver doesn't define apilevel")
128 |
129 | def test_threadsafety(self):
130 | try:
131 | # Must exist
132 | threadsafety = self.driver.threadsafety
133 | # Must be a valid value
134 | _failUnless(self, threadsafety in (0,1,2,3))
135 | except AttributeError:
136 | self.fail("Driver doesn't define threadsafety")
137 |
138 | def test_paramstyle(self):
139 | try:
140 | # Must exist
141 | paramstyle = self.driver.paramstyle
142 | # Must be a valid value
143 | _failUnless(self, paramstyle in (
144 | 'qmark','numeric','named','format','pyformat'
145 | ))
146 | except AttributeError:
147 | self.fail("Driver doesn't define paramstyle")
148 |
149 | def test_Exceptions(self):
150 | # Make sure required exceptions exist, and are in the
151 | # defined heirarchy.
152 | if sys.version[0] == '3': #under Python 3 StardardError no longer exists
153 | self.assertTrue(issubclass(self.driver.Warning,Exception))
154 | self.assertTrue(issubclass(self.driver.Error,Exception))
155 | else:
156 | self.failUnless(issubclass(self.driver.Warning,StandardError))
157 | self.failUnless(issubclass(self.driver.Error,StandardError))
158 |
159 | _failUnless(self,
160 | issubclass(self.driver.InterfaceError,self.driver.Error)
161 | )
162 | _failUnless(self,
163 | issubclass(self.driver.DatabaseError,self.driver.Error)
164 | )
165 | _failUnless(self,
166 | issubclass(self.driver.OperationalError,self.driver.Error)
167 | )
168 | _failUnless(self,
169 | issubclass(self.driver.IntegrityError,self.driver.Error)
170 | )
171 | _failUnless(self,
172 | issubclass(self.driver.InternalError,self.driver.Error)
173 | )
174 | _failUnless(self,
175 | issubclass(self.driver.ProgrammingError,self.driver.Error)
176 | )
177 | _failUnless(self,
178 | issubclass(self.driver.NotSupportedError,self.driver.Error)
179 | )
180 |
181 | def test_ExceptionsAsConnectionAttributes(self):
182 | # OPTIONAL EXTENSION
183 | # Test for the optional DB API 2.0 extension, where the exceptions
184 | # are exposed as attributes on the Connection object
185 | # I figure this optional extension will be implemented by any
186 | # driver author who is using this test suite, so it is enabled
187 | # by default.
188 | con = self._connect()
189 | drv = self.driver
190 | _failUnless(self,con.Warning is drv.Warning)
191 | _failUnless(self,con.Error is drv.Error)
192 | _failUnless(self,con.InterfaceError is drv.InterfaceError)
193 | _failUnless(self,con.DatabaseError is drv.DatabaseError)
194 | _failUnless(self,con.OperationalError is drv.OperationalError)
195 | _failUnless(self,con.IntegrityError is drv.IntegrityError)
196 | _failUnless(self,con.InternalError is drv.InternalError)
197 | _failUnless(self,con.ProgrammingError is drv.ProgrammingError)
198 | _failUnless(self,con.NotSupportedError is drv.NotSupportedError)
199 |
200 |
201 | def test_commit(self):
202 | con = self._connect()
203 | try:
204 | # Commit must work, even if it doesn't do anything
205 | con.commit()
206 | finally:
207 | con.close()
208 |
209 | def test_rollback(self):
210 | con = self._connect()
211 | # If rollback is defined, it should either work or throw
212 | # the documented exception
213 | if hasattr(con,'rollback'):
214 | try:
215 | con.rollback()
216 | except self.driver.NotSupportedError:
217 | pass
218 |
219 | def test_cursor(self):
220 | con = self._connect()
221 | try:
222 | cur = con.cursor()
223 | finally:
224 | con.close()
225 |
226 | def test_cursor_isolation(self):
227 | con = self._connect()
228 | try:
229 | # Make sure cursors created from the same connection have
230 | # the documented transaction isolation level
231 | cur1 = con.cursor()
232 | cur2 = con.cursor()
233 | self.executeDDL1(cur1)
234 | cur1.execute("%s into %sbooze values ('Victoria Bitter')" % (
235 | self.insert, self.table_prefix
236 | ))
237 | cur2.execute("select name from %sbooze" % self.table_prefix)
238 | booze = cur2.fetchall()
239 | self.assertEqual(len(booze),1)
240 | self.assertEqual(len(booze[0]),1)
241 | self.assertEqual(booze[0][0],'Victoria Bitter')
242 | finally:
243 | con.close()
244 |
245 | def test_description(self):
246 | con = self._connect()
247 | try:
248 | cur = con.cursor()
249 | self.executeDDL1(cur)
250 | self.assertEqual(cur.description,None,
251 | 'cursor.description should be none after executing a '
252 | 'statement that can return no rows (such as DDL)'
253 | )
254 | cur.execute('select name from %sbooze' % self.table_prefix)
255 | self.assertEqual(len(cur.description),1,
256 | 'cursor.description describes too many columns'
257 | )
258 | self.assertEqual(len(cur.description[0]),7,
259 | 'cursor.description[x] tuples must have 7 elements'
260 | )
261 | self.assertEqual(cur.description[0][0].lower(),'name',
262 | 'cursor.description[x][0] must return column name'
263 | )
264 | self.assertEqual(cur.description[0][1],self.driver.STRING,
265 | 'cursor.description[x][1] must return column type. Got %r'
266 | % cur.description[0][1]
267 | )
268 |
269 | # Make sure self.description gets reset
270 | self.executeDDL2(cur)
271 | self.assertEqual(cur.description,None,
272 | 'cursor.description not being set to None when executing '
273 | 'no-result statements (eg. DDL)'
274 | )
275 | finally:
276 | con.close()
277 |
278 | def test_rowcount(self):
279 | con = self._connect()
280 | try:
281 | cur = con.cursor()
282 | self.executeDDL1(cur)
283 | _failUnless(self,cur.rowcount in (-1,0), # Bug #543885
284 | 'cursor.rowcount should be -1 or 0 after executing no-result '
285 | 'statements'
286 | )
287 | cur.execute("%s into %sbooze values ('Victoria Bitter')" % (
288 | self.insert, self.table_prefix
289 | ))
290 | _failUnless(self,cur.rowcount in (-1,1),
291 | 'cursor.rowcount should == number or rows inserted, or '
292 | 'set to -1 after executing an insert statement'
293 | )
294 | cur.execute("select name from %sbooze" % self.table_prefix)
295 | _failUnless(self,cur.rowcount in (-1,1),
296 | 'cursor.rowcount should == number of rows returned, or '
297 | 'set to -1 after executing a select statement'
298 | )
299 | self.executeDDL2(cur)
300 | _failUnless(self,cur.rowcount in (-1,0), # Bug #543885
301 | 'cursor.rowcount should be -1 or 0 after executing no-result '
302 | 'statements'
303 | )
304 | finally:
305 | con.close()
306 |
307 | lower_func = 'lower'
308 | def test_callproc(self):
309 | con = self._connect()
310 | try:
311 | cur = con.cursor()
312 | if self.lower_func and hasattr(cur,'callproc'):
313 | r = cur.callproc(self.lower_func,('FOO',))
314 | self.assertEqual(len(r),1)
315 | self.assertEqual(r[0],'FOO')
316 | r = cur.fetchall()
317 | self.assertEqual(len(r),1,'callproc produced no result set')
318 | self.assertEqual(len(r[0]),1,
319 | 'callproc produced invalid result set'
320 | )
321 | self.assertEqual(r[0][0],'foo',
322 | 'callproc produced invalid results'
323 | )
324 | finally:
325 | con.close()
326 |
327 | def test_close(self):
328 | con = self._connect()
329 | try:
330 | cur = con.cursor()
331 | finally:
332 | con.close()
333 |
334 | # cursor.execute should raise an Error if called after connection
335 | # closed
336 | self.assertRaises(self.driver.Error,self.executeDDL1,cur)
337 |
338 | # connection.commit should raise an Error if called after connection'
339 | # closed.'
340 | self.assertRaises(self.driver.Error,con.commit)
341 |
342 | def test_non_idempotent_close(self):
343 | con = self._connect()
344 | con.close()
345 | # connection.close should raise an Error if called more than once
346 | #!!! reasonable persons differ about the usefulness of this test and this feature !!!
347 | self.assertRaises(self.driver.Error,con.close)
348 |
349 | def test_execute(self):
350 | con = self._connect()
351 | try:
352 | cur = con.cursor()
353 | self._paraminsert(cur)
354 | finally:
355 | con.close()
356 |
357 | def _paraminsert(self,cur):
358 | self.executeDDL2(cur)
359 | cur.execute("%s into %sbarflys values ('Victoria Bitter', 'thi%%s :may ca%%(u)se? troub:1e')" % (
360 | self.insert, self.table_prefix
361 | ))
362 | _failUnless(self,cur.rowcount in (-1,1))
363 |
364 | if self.driver.paramstyle == 'qmark':
365 | cur.execute(
366 | "%s into %sbarflys values (?, 'thi%%s :may ca%%(u)se? troub:1e')" % (self.insert, self.table_prefix),
367 | ("Cooper's",)
368 | )
369 | elif self.driver.paramstyle == 'numeric':
370 | cur.execute(
371 | "%s into %sbarflys values (:1, 'thi%%s :may ca%%(u)se? troub:1e')" % (self.insert, self.table_prefix),
372 | ("Cooper's",)
373 | )
374 | elif self.driver.paramstyle == 'named':
375 | cur.execute(
376 | "%s into %sbarflys values (:beer, 'thi%%s :may ca%%(u)se? troub:1e')" % (self.insert, self.table_prefix),
377 | {'beer':"Cooper's"}
378 | )
379 | elif self.driver.paramstyle == 'format':
380 | cur.execute(
381 | "%s into %sbarflys values (%%s, 'thi%%%%s :may ca%%%%(u)se? troub:1e')" % (self.insert, self.table_prefix),
382 | ("Cooper's",)
383 | )
384 | elif self.driver.paramstyle == 'pyformat':
385 | cur.execute(
386 | "%s into %sbarflys values (%%(beer)s, 'thi%%%%s :may ca%%%%(u)se? troub:1e')" % (self.insert, self.table_prefix),
387 | {'beer':"Cooper's"}
388 | )
389 | else:
390 | self.fail('Invalid paramstyle')
391 | _failUnless(self,cur.rowcount in (-1,1))
392 |
393 | cur.execute('select name, drink from %sbarflys' % self.table_prefix)
394 | res = cur.fetchall()
395 | self.assertEqual(len(res),2,'cursor.fetchall returned too few rows')
396 | beers = [res[0][0],res[1][0]]
397 | beers.sort()
398 | self.assertEqual(beers[0],"Cooper's",
399 | 'cursor.fetchall retrieved incorrect data, or data inserted '
400 | 'incorrectly'
401 | )
402 | self.assertEqual(beers[1],"Victoria Bitter",
403 | 'cursor.fetchall retrieved incorrect data, or data inserted '
404 | 'incorrectly'
405 | )
406 | trouble = "thi%s :may ca%(u)se? troub:1e"
407 | self.assertEqual(res[0][1], trouble,
408 | 'cursor.fetchall retrieved incorrect data, or data inserted '
409 | 'incorrectly. Got=%s, Expected=%s' % (repr(res[0][1]), repr(trouble)))
410 | self.assertEqual(res[1][1], trouble,
411 | 'cursor.fetchall retrieved incorrect data, or data inserted '
412 | 'incorrectly. Got=%s, Expected=%s' % (repr(res[1][1]), repr(trouble)
413 | ))
414 |
415 | def test_executemany(self):
416 | con = self._connect()
417 | try:
418 | cur = con.cursor()
419 | self.executeDDL1(cur)
420 | largs = [ ("Cooper's",) , ("Boag's",) ]
421 | margs = [ {'beer': "Cooper's"}, {'beer': "Boag's"} ]
422 | if self.driver.paramstyle == 'qmark':
423 | cur.executemany(
424 | '%s into %sbooze values (?)' % (self.insert, self.table_prefix),
425 | largs
426 | )
427 | elif self.driver.paramstyle == 'numeric':
428 | cur.executemany(
429 | '%s into %sbooze values (:1)' % (self.insert, self.table_prefix),
430 | largs
431 | )
432 | elif self.driver.paramstyle == 'named':
433 | cur.executemany(
434 | '%s into %sbooze values (:beer)' % (self.insert, self.table_prefix),
435 | margs
436 | )
437 | elif self.driver.paramstyle == 'format':
438 | cur.executemany(
439 | '%s into %sbooze values (%%s)' % (self.insert, self.table_prefix),
440 | largs
441 | )
442 | elif self.driver.paramstyle == 'pyformat':
443 | cur.executemany(
444 | '%s into %sbooze values (%%(beer)s)' % (
445 | self.insert, self.table_prefix
446 | ),
447 | margs
448 | )
449 | else:
450 | self.fail('Unknown paramstyle')
451 | _failUnless(self,cur.rowcount in (-1,2),
452 | 'insert using cursor.executemany set cursor.rowcount to '
453 | 'incorrect value %r' % cur.rowcount
454 | )
455 | cur.execute('select name from %sbooze' % self.table_prefix)
456 | res = cur.fetchall()
457 | self.assertEqual(len(res),2,
458 | 'cursor.fetchall retrieved incorrect number of rows'
459 | )
460 | beers = [res[0][0],res[1][0]]
461 | beers.sort()
462 | self.assertEqual(beers[0],"Boag's",'incorrect data retrieved')
463 | self.assertEqual(beers[1],"Cooper's",'incorrect data retrieved')
464 | finally:
465 | con.close()
466 |
467 | def test_fetchone(self):
468 | con = self._connect()
469 | try:
470 | cur = con.cursor()
471 |
472 | # cursor.fetchone should raise an Error if called before
473 | # executing a select-type query
474 | self.assertRaises(self.driver.Error,cur.fetchone)
475 |
476 | # cursor.fetchone should raise an Error if called after
477 | # executing a query that cannnot return rows
478 | self.executeDDL1(cur)
479 | self.assertRaises(self.driver.Error,cur.fetchone)
480 |
481 | cur.execute('select name from %sbooze' % self.table_prefix)
482 | self.assertEqual(cur.fetchone(),None,
483 | 'cursor.fetchone should return None if a query retrieves '
484 | 'no rows'
485 | )
486 | _failUnless(self,cur.rowcount in (-1,0))
487 |
488 | # cursor.fetchone should raise an Error if called after
489 | # executing a query that cannnot return rows
490 | cur.execute("%s into %sbooze values ('Victoria Bitter')" % (
491 | self.insert, self.table_prefix
492 | ))
493 | self.assertRaises(self.driver.Error,cur.fetchone)
494 |
495 | cur.execute('select name from %sbooze' % self.table_prefix)
496 | r = cur.fetchone()
497 | self.assertEqual(len(r),1,
498 | 'cursor.fetchone should have retrieved a single row'
499 | )
500 | self.assertEqual(r[0],'Victoria Bitter',
501 | 'cursor.fetchone retrieved incorrect data'
502 | )
503 | self.assertEqual(cur.fetchone(),None,
504 | 'cursor.fetchone should return None if no more rows available'
505 | )
506 | _failUnless(self,cur.rowcount in (-1,1))
507 | finally:
508 | con.close()
509 |
510 | samples = [
511 | 'Carlton Cold',
512 | 'Carlton Draft',
513 | 'Mountain Goat',
514 | 'Redback',
515 | 'Victoria Bitter',
516 | 'XXXX'
517 | ]
518 |
519 | def _populate(self):
520 | ''' Return a list of sql commands to setup the DB for the fetch
521 | tests.
522 | '''
523 | populate = [
524 | "%s into %sbooze values ('%s')" % (self.insert, self.table_prefix, s)
525 | for s in self.samples
526 | ]
527 | return populate
528 |
529 | def test_fetchmany(self):
530 | con = self._connect()
531 | try:
532 | cur = con.cursor()
533 |
534 | # cursor.fetchmany should raise an Error if called without
535 | #issuing a query
536 | self.assertRaises(self.driver.Error,cur.fetchmany,4)
537 |
538 | self.executeDDL1(cur)
539 | for sql in self._populate():
540 | cur.execute(sql)
541 |
542 | cur.execute('select name from %sbooze' % self.table_prefix)
543 | r = cur.fetchmany()
544 | self.assertEqual(len(r),1,
545 | 'cursor.fetchmany retrieved incorrect number of rows, '
546 | 'default of arraysize is one.'
547 | )
548 | cur.arraysize=10
549 | r = cur.fetchmany(3) # Should get 3 rows
550 | self.assertEqual(len(r),3,
551 | 'cursor.fetchmany retrieved incorrect number of rows'
552 | )
553 | r = cur.fetchmany(4) # Should get 2 more
554 | self.assertEqual(len(r),2,
555 | 'cursor.fetchmany retrieved incorrect number of rows'
556 | )
557 | r = cur.fetchmany(4) # Should be an empty sequence
558 | self.assertEqual(len(r),0,
559 | 'cursor.fetchmany should return an empty sequence after '
560 | 'results are exhausted'
561 | )
562 | _failUnless(self,cur.rowcount in (-1,6))
563 |
564 | # Same as above, using cursor.arraysize
565 | cur.arraysize=4
566 | cur.execute('select name from %sbooze' % self.table_prefix)
567 | r = cur.fetchmany() # Should get 4 rows
568 | self.assertEqual(len(r),4,
569 | 'cursor.arraysize not being honoured by fetchmany'
570 | )
571 | r = cur.fetchmany() # Should get 2 more
572 | self.assertEqual(len(r),2)
573 | r = cur.fetchmany() # Should be an empty sequence
574 | self.assertEqual(len(r),0)
575 | _failUnless(self,cur.rowcount in (-1,6))
576 |
577 | cur.arraysize=6
578 | cur.execute('select name from %sbooze' % self.table_prefix)
579 | rows = cur.fetchmany() # Should get all rows
580 | _failUnless(self,cur.rowcount in (-1,6))
581 | self.assertEqual(len(rows),6)
582 | self.assertEqual(len(rows),6)
583 | rows = [r[0] for r in rows]
584 | rows.sort()
585 |
586 | # Make sure we get the right data back out
587 | for i in range(0,6):
588 | self.assertEqual(rows[i],self.samples[i],
589 | 'incorrect data retrieved by cursor.fetchmany'
590 | )
591 |
592 | rows = cur.fetchmany() # Should return an empty list
593 | self.assertEqual(len(rows),0,
594 | 'cursor.fetchmany should return an empty sequence if '
595 | 'called after the whole result set has been fetched'
596 | )
597 | _failUnless(self,cur.rowcount in (-1,6))
598 |
599 | self.executeDDL2(cur)
600 | cur.execute('select name from %sbarflys' % self.table_prefix)
601 | r = cur.fetchmany() # Should get empty sequence
602 | self.assertEqual(len(r),0,
603 | 'cursor.fetchmany should return an empty sequence if '
604 | 'query retrieved no rows'
605 | )
606 | _failUnless(self,cur.rowcount in (-1,0))
607 |
608 | finally:
609 | con.close()
610 |
611 | def test_fetchall(self):
612 | con = self._connect()
613 | try:
614 | cur = con.cursor()
615 | # cursor.fetchall should raise an Error if called
616 | # without executing a query that may return rows (such
617 | # as a select)
618 | self.assertRaises(self.driver.Error, cur.fetchall)
619 |
620 | self.executeDDL1(cur)
621 | for sql in self._populate():
622 | cur.execute(sql)
623 |
624 | # cursor.fetchall should raise an Error if called
625 | # after executing a a statement that cannot return rows
626 | self.assertRaises(self.driver.Error,cur.fetchall)
627 |
628 | cur.execute('select name from %sbooze' % self.table_prefix)
629 | rows = cur.fetchall()
630 | _failUnless(self,cur.rowcount in (-1,len(self.samples)))
631 | self.assertEqual(len(rows),len(self.samples),
632 | 'cursor.fetchall did not retrieve all rows'
633 | )
634 | rows = [r[0] for r in rows]
635 | rows.sort()
636 | for i in range(0,len(self.samples)):
637 | self.assertEqual(rows[i],self.samples[i],
638 | 'cursor.fetchall retrieved incorrect rows'
639 | )
640 | rows = cur.fetchall()
641 | self.assertEqual(
642 | len(rows),0,
643 | 'cursor.fetchall should return an empty list if called '
644 | 'after the whole result set has been fetched'
645 | )
646 | _failUnless(self,cur.rowcount in (-1,len(self.samples)))
647 |
648 | self.executeDDL2(cur)
649 | cur.execute('select name from %sbarflys' % self.table_prefix)
650 | rows = cur.fetchall()
651 | _failUnless(self,cur.rowcount in (-1,0))
652 | self.assertEqual(len(rows),0,
653 | 'cursor.fetchall should return an empty list if '
654 | 'a select query returns no rows'
655 | )
656 |
657 | finally:
658 | con.close()
659 |
660 | def test_mixedfetch(self):
661 | con = self._connect()
662 | try:
663 | cur = con.cursor()
664 | self.executeDDL1(cur)
665 | for sql in self._populate():
666 | cur.execute(sql)
667 |
668 | cur.execute('select name from %sbooze' % self.table_prefix)
669 | rows1 = cur.fetchone()
670 | rows23 = cur.fetchmany(2)
671 | rows4 = cur.fetchone()
672 | rows56 = cur.fetchall()
673 | _failUnless(self,cur.rowcount in (-1,6))
674 | self.assertEqual(len(rows23),2,
675 | 'fetchmany returned incorrect number of rows'
676 | )
677 | self.assertEqual(len(rows56),2,
678 | 'fetchall returned incorrect number of rows'
679 | )
680 |
681 | rows = [rows1[0]]
682 | rows.extend([rows23[0][0],rows23[1][0]])
683 | rows.append(rows4[0])
684 | rows.extend([rows56[0][0],rows56[1][0]])
685 | rows.sort()
686 | for i in range(0,len(self.samples)):
687 | self.assertEqual(rows[i],self.samples[i],
688 | 'incorrect data retrieved or inserted'
689 | )
690 | finally:
691 | con.close()
692 |
693 | def help_nextset_setUp(self,cur):
694 | ''' Should create a procedure called deleteme
695 | that returns two result sets, first the
696 | number of rows in booze then "name from booze"
697 | '''
698 | raise NotImplementedError('Helper not implemented')
699 | #sql="""
700 | # create procedure deleteme as
701 | # begin
702 | # select count(*) from booze
703 | # select name from booze
704 | # end
705 | #"""
706 | #cur.execute(sql)
707 |
708 | def help_nextset_tearDown(self,cur):
709 | 'If cleaning up is needed after nextSetTest'
710 | raise NotImplementedError('Helper not implemented')
711 | #cur.execute("drop procedure deleteme")
712 |
713 | def test_nextset(self):
714 | con = self._connect()
715 | try:
716 | cur = con.cursor()
717 | if not hasattr(cur,'nextset'):
718 | return
719 |
720 | try:
721 | self.executeDDL1(cur)
722 | sql=self._populate()
723 | for sql in self._populate():
724 | cur.execute(sql)
725 |
726 | self.help_nextset_setUp(cur)
727 |
728 | cur.callproc('deleteme')
729 | numberofrows=cur.fetchone()
730 | assert numberofrows[0]== len(self.samples)
731 | assert cur.nextset()
732 | names=cur.fetchall()
733 | assert len(names) == len(self.samples)
734 | s=cur.nextset()
735 | assert s == None,'No more return sets, should return None'
736 | finally:
737 | self.help_nextset_tearDown(cur)
738 |
739 | finally:
740 | con.close()
741 |
742 | def test_nextset(self):
743 | raise NotImplementedError('Drivers need to override this test')
744 |
745 | def test_arraysize(self):
746 | # Not much here - rest of the tests for this are in test_fetchmany
747 | con = self._connect()
748 | try:
749 | cur = con.cursor()
750 | _failUnless(self,hasattr(cur,'arraysize'),
751 | 'cursor.arraysize must be defined'
752 | )
753 | finally:
754 | con.close()
755 |
756 | def test_setinputsizes(self):
757 | con = self._connect()
758 | try:
759 | cur = con.cursor()
760 | cur.setinputsizes( (25,) )
761 | self._paraminsert(cur) # Make sure cursor still works
762 | finally:
763 | con.close()
764 |
765 | def test_setoutputsize_basic(self):
766 | # Basic test is to make sure setoutputsize doesn't blow up
767 | con = self._connect()
768 | try:
769 | cur = con.cursor()
770 | cur.setoutputsize(1000)
771 | cur.setoutputsize(2000,0)
772 | self._paraminsert(cur) # Make sure the cursor still works
773 | finally:
774 | con.close()
775 |
776 | def test_setoutputsize(self):
777 | # Real test for setoutputsize is driver dependant
778 | raise NotImplementedError('Driver needed to override this test')
779 |
780 | def test_None(self):
781 | con = self._connect()
782 | try:
783 | cur = con.cursor()
784 | self.executeDDL1(cur)
785 | cur.execute("%s into %sbarflys values ('a', NULL)" % (self.insert, self.table_prefix))
786 | cur.execute('select drink from %sbarflys' % self.table_prefix)
787 | r = cur.fetchall()
788 | self.assertEqual(len(r),1)
789 | self.assertEqual(len(r[0]),1)
790 | self.assertEqual(r[0][0],None,'NULL value not returned as None')
791 | finally:
792 | con.close()
793 |
794 | def test_Date(self):
795 | d1 = self.driver.Date(2002,12,25)
796 | d2 = self.driver.DateFromTicks(time.mktime((2002,12,25,0,0,0,0,0,0)))
797 | # Can we assume this? API doesn't specify, but it seems implied
798 | # self.assertEqual(str(d1),str(d2))
799 |
800 | def test_Time(self):
801 | t1 = self.driver.Time(13,45,30)
802 | t2 = self.driver.TimeFromTicks(time.mktime((2001,1,1,13,45,30,0,0,0)))
803 | # Can we assume this? API doesn't specify, but it seems implied
804 | # self.assertEqual(str(t1),str(t2))
805 |
806 | def test_Timestamp(self):
807 | t1 = self.driver.Timestamp(2002,12,25,13,45,30)
808 | t2 = self.driver.TimestampFromTicks(
809 | time.mktime((2002,12,25,13,45,30,0,0,0))
810 | )
811 | # Can we assume this? API doesn't specify, but it seems implied
812 | # self.assertEqual(str(t1),str(t2))
813 |
814 | def test_Binary(self):
815 | b = self.driver.Binary(str2bytes('Something'))
816 | b = self.driver.Binary(str2bytes(''))
817 |
818 | def test_STRING(self):
819 | _failUnless(self, hasattr(self.driver,'STRING'),
820 | 'module.STRING must be defined'
821 | )
822 |
823 | def test_BINARY(self):
824 | _failUnless(self, hasattr(self.driver,'BINARY'),
825 | 'module.BINARY must be defined.'
826 | )
827 |
828 | def test_NUMBER(self):
829 | _failUnless(self, hasattr(self.driver,'NUMBER'),
830 | 'module.NUMBER must be defined.'
831 | )
832 |
833 | def test_DATETIME(self):
834 | _failUnless(self, hasattr(self.driver,'DATETIME'),
835 | 'module.DATETIME must be defined.'
836 | )
837 |
838 | def test_ROWID(self):
839 | _failUnless(self, hasattr(self.driver,'ROWID'),
840 | 'module.ROWID must be defined.'
841 | )
842 |
--------------------------------------------------------------------------------
/phoenixdb/tests/test_avatica.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from phoenixdb.avatica.client import parse_url, urlparse
3 |
4 |
5 | class ParseUrlTest(unittest.TestCase):
6 |
7 | def test_parse_url(self):
8 | self.assertEqual(urlparse.urlparse('http://localhost:8765/'), parse_url('localhost'))
9 | self.assertEqual(urlparse.urlparse('http://localhost:2222/'), parse_url('localhost:2222'))
10 | self.assertEqual(urlparse.urlparse('http://localhost:2222/'), parse_url('http://localhost:2222/'))
11 |
--------------------------------------------------------------------------------
/phoenixdb/tests/test_connection.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import phoenixdb
3 | from phoenixdb.tests import TEST_DB_URL
4 |
5 |
6 | @unittest.skipIf(TEST_DB_URL is None, "these tests require the PHOENIXDB_TEST_DB_URL environment variable set to a clean database")
7 | class PhoenixConnectionTest(unittest.TestCase):
8 |
9 | def _connect(self, connect_kw_args):
10 | try:
11 | r = phoenixdb.connect(TEST_DB_URL, **connect_kw_args)
12 | except AttributeError:
13 | self.fail("Failed to connect")
14 | return r
15 |
16 | def test_connection_credentials(self):
17 | connect_kw_args = {'user': 'SCOTT', 'password': 'TIGER', 'readonly': 'True'}
18 | con = self._connect(connect_kw_args)
19 | try:
20 | self.assertEqual(
21 | con._connection_args, {'user': 'SCOTT', 'password': 'TIGER'},
22 | 'Should have extract user and password')
23 | self.assertEqual(
24 | con._filtered_args, {'readonly': 'True'},
25 | 'Should have not extracted foo')
26 | finally:
27 | con.close()
28 |
--------------------------------------------------------------------------------
/phoenixdb/tests/test_db.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import phoenixdb
3 | import phoenixdb.cursor
4 | from phoenixdb.errors import InternalError
5 | from phoenixdb.tests import TEST_DB_URL
6 |
7 |
8 | @unittest.skipIf(TEST_DB_URL is None, "these tests require the PHOENIXDB_TEST_DB_URL environment variable set to a clean database")
9 | class PhoenixDatabaseTest(unittest.TestCase):
10 |
11 | def test_select_literal(self):
12 | db = phoenixdb.connect(TEST_DB_URL, autocommit=True)
13 | self.addCleanup(db.close)
14 |
15 | with db.cursor() as cursor:
16 | cursor.execute("DROP TABLE IF EXISTS test")
17 | cursor.execute("CREATE TABLE test (id INTEGER PRIMARY KEY, text VARCHAR)")
18 | cursor.executemany("UPSERT INTO test VALUES (?, ?)", [[i, 'text {}'.format(i)] for i in range(10)])
19 |
20 | with db.cursor() as cursor:
21 | cursor.itersize = 4
22 | cursor.execute("SELECT * FROM test WHERE id>1 ORDER BY id")
23 | self.assertEqual(cursor.fetchall(), [[i, 'text {}'.format(i)] for i in range(2, 10)])
24 |
25 | def test_select_parameter(self):
26 | db = phoenixdb.connect(TEST_DB_URL, autocommit=True)
27 | self.addCleanup(db.close)
28 |
29 | with db.cursor() as cursor:
30 | cursor.execute("DROP TABLE IF EXISTS test")
31 | cursor.execute("CREATE TABLE test (id INTEGER PRIMARY KEY, text VARCHAR)")
32 | cursor.executemany("UPSERT INTO test VALUES (?, ?)", [[i, 'text {}'.format(i)] for i in range(10)])
33 |
34 | with db.cursor() as cursor:
35 | cursor.itersize = 4
36 | cursor.execute("SELECT * FROM test WHERE id>? ORDER BY id", [1])
37 | self.assertEqual(cursor.fetchall(), [[i, 'text {}'.format(i)] for i in range(2, 10)])
38 |
39 | def _check_dict_cursor(self, cursor):
40 | cursor.execute("DROP TABLE IF EXISTS test")
41 | cursor.execute("CREATE TABLE test (id INTEGER PRIMARY KEY, text VARCHAR)")
42 | cursor.execute("UPSERT INTO test VALUES (?, ?)", [1, 'text 1'])
43 | cursor.execute("SELECT * FROM test ORDER BY id")
44 | self.assertEqual(cursor.fetchall(), [{'ID': 1, 'TEXT': 'text 1'}])
45 |
46 | def test_dict_cursor_default_parameter(self):
47 | db = phoenixdb.connect(TEST_DB_URL, autocommit=True, cursor_factory=phoenixdb.cursor.DictCursor)
48 | self.addCleanup(db.close)
49 |
50 | with db.cursor() as cursor:
51 | self._check_dict_cursor(cursor)
52 |
53 | def test_dict_cursor_default_attribute(self):
54 | db = phoenixdb.connect(TEST_DB_URL, autocommit=True)
55 | db.cursor_factory = phoenixdb.cursor.DictCursor
56 | self.addCleanup(db.close)
57 |
58 | with db.cursor() as cursor:
59 | self._check_dict_cursor(cursor)
60 |
61 | def test_dict_cursor(self):
62 | db = phoenixdb.connect(TEST_DB_URL, autocommit=True)
63 | self.addCleanup(db.close)
64 |
65 | with db.cursor(cursor_factory=phoenixdb.cursor.DictCursor) as cursor:
66 | self._check_dict_cursor(cursor)
67 |
68 | def test_schema(self):
69 | db = phoenixdb.connect(TEST_DB_URL, autocommit=True)
70 | self.addCleanup(db.close)
71 |
72 | with db.cursor() as cursor:
73 | try:
74 | cursor.execute("CREATE SCHEMA IF NOT EXISTS test_schema")
75 | except InternalError as e:
76 | if "phoenix.schema.isNamespaceMappingEnabled" in e.message:
77 | self.skipTest(e.message)
78 | raise
79 |
80 | cursor.execute("DROP TABLE IF EXISTS test_schema.test")
81 | cursor.execute("CREATE TABLE test_schema.test (id INTEGER PRIMARY KEY, text VARCHAR)")
82 | cursor.execute("UPSERT INTO test_schema.test VALUES (?, ?)", [1, 'text 1'])
83 | cursor.execute("SELECT * FROM test_schema.test ORDER BY id")
84 | self.assertEqual(cursor.fetchall(), [[1, 'text 1']])
85 |
--------------------------------------------------------------------------------
/phoenixdb/tests/test_dbapi20.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | import phoenixdb
3 | from . import dbapi20
4 | from phoenixdb.tests import TEST_DB_URL
5 |
6 |
7 | @unittest.skipIf(TEST_DB_URL is None, "these tests require the PHOENIXDB_TEST_DB_URL environment variable set to a clean database")
8 | class PhoenixDatabaseAPI20Test(dbapi20.DatabaseAPI20Test):
9 | driver = phoenixdb
10 | connect_args = (TEST_DB_URL, )
11 |
12 | ddl1 = 'create table %sbooze (name varchar(20) primary key)' % dbapi20.DatabaseAPI20Test.table_prefix
13 | ddl2 = 'create table %sbarflys (name varchar(20) primary key, drink varchar(30))' % dbapi20.DatabaseAPI20Test.table_prefix
14 | insert = 'upsert'
15 |
16 | def test_nextset(self):
17 | pass
18 |
19 | def test_setoutputsize(self):
20 | pass
21 |
22 | def _connect(self):
23 | con = dbapi20.DatabaseAPI20Test._connect(self)
24 | con.autocommit = True
25 | return con
26 |
27 | def test_None(self):
28 | con = self._connect()
29 | try:
30 | cur = con.cursor()
31 | self.executeDDL2(cur)
32 | cur.execute("%s into %sbarflys values ('a', NULL)" % (self.insert, self.table_prefix))
33 | cur.execute('select drink from %sbarflys' % self.table_prefix)
34 | r = cur.fetchall()
35 | self.assertEqual(len(r), 1)
36 | self.assertEqual(len(r[0]), 1)
37 | self.assertEqual(r[0][0], None, 'NULL value not returned as None')
38 | finally:
39 | con.close()
40 |
41 | def test_autocommit(self):
42 | con = dbapi20.DatabaseAPI20Test._connect(self)
43 | self.assertFalse(con.autocommit)
44 | con.autocommit = True
45 | self.assertTrue(con.autocommit)
46 | con.autocommit = False
47 | self.assertFalse(con.autocommit)
48 | con.close()
49 |
50 | def test_readonly(self):
51 | con = dbapi20.DatabaseAPI20Test._connect(self)
52 | self.assertFalse(con.readonly)
53 | con.readonly = True
54 | self.assertTrue(con.readonly)
55 | con.readonly = False
56 | self.assertFalse(con.readonly)
57 | con.close()
58 |
59 | def test_iter(self):
60 | # https://www.python.org/dev/peps/pep-0249/#iter
61 | con = self._connect()
62 | try:
63 | cur = con.cursor()
64 | if hasattr(cur, '__iter__'):
65 | self.assertIs(cur, iter(cur))
66 | finally:
67 | con.close()
68 |
69 | def test_next(self):
70 | # https://www.python.org/dev/peps/pep-0249/#next
71 | con = self._connect()
72 | try:
73 | cur = con.cursor()
74 | if not hasattr(cur, 'next'):
75 | return
76 |
77 | # cursor.next should raise an Error if called before
78 | # executing a select-type query
79 | self.assertRaises(self.driver.Error, cur.next)
80 |
81 | # cursor.next should raise an Error if called after
82 | # executing a query that cannnot return rows
83 | self.executeDDL1(cur)
84 | self.assertRaises(self.driver.Error, cur.next)
85 |
86 | # cursor.next should return None if a query retrieves '
87 | # no rows
88 | cur.execute('select name from %sbooze' % self.table_prefix)
89 | self.assertRaises(StopIteration, cur.next)
90 | self.failUnless(cur.rowcount in (-1, 0))
91 |
92 | # cursor.next should raise an Error if called after
93 | # executing a query that cannnot return rows
94 | cur.execute("%s into %sbooze values ('Victoria Bitter')" % (
95 | self.insert, self.table_prefix
96 | ))
97 | self.assertRaises(self.driver.Error, cur.next)
98 |
99 | cur.execute('select name from %sbooze' % self.table_prefix)
100 | r = cur.next()
101 | self.assertEqual(len(r), 1, 'cursor.next should have retrieved a row with one column')
102 | self.assertEqual(r[0], 'Victoria Bitter', 'cursor.next retrieved incorrect data')
103 | # cursor.next should raise StopIteration if no more rows available
104 | self.assertRaises(StopIteration, cur.next)
105 | self.failUnless(cur.rowcount in (-1, 1))
106 | finally:
107 | con.close()
108 |
--------------------------------------------------------------------------------
/phoenixdb/tests/test_errors.py:
--------------------------------------------------------------------------------
1 | from phoenixdb.tests import DatabaseTestCase
2 |
3 |
4 | class ProgrammingErrorTest(DatabaseTestCase):
5 |
6 | def test_invalid_sql(self):
7 | with self.conn.cursor() as cursor:
8 | with self.assertRaises(self.conn.ProgrammingError) as cm:
9 | cursor.execute("UPS")
10 | self.assertEqual("Syntax error. Encountered \"UPS\" at line 1, column 1.", cm.exception.message)
11 | self.assertEqual(601, cm.exception.code)
12 | self.assertEqual("42P00", cm.exception.sqlstate)
13 |
14 |
15 | class IntegrityErrorTest(DatabaseTestCase):
16 |
17 | def test_null_in_pk(self):
18 | self.createTable("phoenixdb_test_tbl1", "id integer primary key")
19 | with self.conn.cursor() as cursor:
20 | with self.assertRaises(self.conn.IntegrityError) as cm:
21 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (NULL)")
22 | self.assertEqual("Constraint violation. PHOENIXDB_TEST_TBL1.ID may not be null", cm.exception.message)
23 | self.assertEqual(218, cm.exception.code)
24 | self.assertIn(cm.exception.sqlstate, ("22018", "23018"))
25 |
26 |
27 | class DataErrorTest(DatabaseTestCase):
28 |
29 | def test_number_outside_of_range(self):
30 | self.createTable("phoenixdb_test_tbl1", "id tinyint primary key")
31 | with self.conn.cursor() as cursor:
32 | with self.assertRaises(self.conn.DataError) as cm:
33 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (10000)")
34 | self.assertEqual("Type mismatch. TINYINT and INTEGER for 10000", cm.exception.message)
35 | self.assertEqual(203, cm.exception.code)
36 | self.assertEqual("22005", cm.exception.sqlstate)
37 |
38 | def test_division_by_zero(self):
39 | self.createTable("phoenixdb_test_tbl1", "id integer primary key")
40 | with self.conn.cursor() as cursor:
41 | with self.assertRaises(self.conn.DataError) as cm:
42 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2/0)")
43 | self.assertEqual("Divide by zero.", cm.exception.message)
44 | self.assertEqual(202, cm.exception.code)
45 | self.assertEqual("22012", cm.exception.sqlstate)
46 |
--------------------------------------------------------------------------------
/phoenixdb/tests/test_types.py:
--------------------------------------------------------------------------------
1 | import sys
2 | import unittest
3 | import datetime
4 | import phoenixdb
5 | from decimal import Decimal
6 | from phoenixdb.tests import DatabaseTestCase
7 |
8 |
9 | class TypesTest(DatabaseTestCase):
10 |
11 | def checkIntType(self, type_name, min_value, max_value):
12 | self.createTable("phoenixdb_test_tbl1", "id integer primary key, val {}".format(type_name))
13 | with self.conn.cursor() as cursor:
14 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, 1)")
15 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, NULL)")
16 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, ?)", [1])
17 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", [None])
18 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (5, ?)", [min_value])
19 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (6, ?)", [max_value])
20 | cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
21 | self.assertEqual(cursor.description[1].type_code, phoenixdb.NUMBER)
22 | self.assertEqual(cursor.fetchall(), [[1, 1], [2, None], [3, 1], [4, None], [5, min_value], [6, max_value]])
23 |
24 | self.assertRaises(
25 | self.conn.DatabaseError, cursor.execute,
26 | "UPSERT INTO phoenixdb_test_tbl1 VALUES (100, {})".format(min_value - 1))
27 |
28 | self.assertRaises(
29 | self.conn.DatabaseError, cursor.execute,
30 | "UPSERT INTO phoenixdb_test_tbl1 VALUES (100, {})".format(max_value + 1))
31 |
32 | # XXX The server silently truncates the values
33 | # self.assertRaises(self.conn.DatabaseError, cursor.execute, "UPSERT INTO phoenixdb_test_tbl1 VALUES (100, ?)", [min_value - 1])
34 | # self.assertRaises(self.conn.DatabaseError, cursor.execute, "UPSERT INTO phoenixdb_test_tbl1 VALUES (100, ?)", [max_value + 1])
35 |
36 | def test_integer(self):
37 | self.checkIntType("integer", -2147483648, 2147483647)
38 |
39 | def test_unsigned_int(self):
40 | self.checkIntType("unsigned_int", 0, 2147483647)
41 |
42 | def test_bigint(self):
43 | self.checkIntType("bigint", -9223372036854775808, 9223372036854775807)
44 |
45 | def test_unsigned_long(self):
46 | self.checkIntType("unsigned_long", 0, 9223372036854775807)
47 |
48 | def test_tinyint(self):
49 | self.checkIntType("tinyint", -128, 127)
50 |
51 | def test_unsigned_tinyint(self):
52 | self.checkIntType("unsigned_tinyint", 0, 127)
53 |
54 | def test_smallint(self):
55 | self.checkIntType("smallint", -32768, 32767)
56 |
57 | def test_unsigned_smallint(self):
58 | self.checkIntType("unsigned_smallint", 0, 32767)
59 |
60 | def checkFloatType(self, type_name, min_value, max_value):
61 | self.createTable("phoenixdb_test_tbl1", "id integer primary key, val {}".format(type_name))
62 | with self.conn.cursor() as cursor:
63 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, 1)")
64 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, NULL)")
65 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, ?)", [1])
66 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", [None])
67 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (5, ?)", [min_value])
68 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (6, ?)", [max_value])
69 | cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
70 | self.assertEqual(cursor.description[1].type_code, phoenixdb.NUMBER)
71 | rows = cursor.fetchall()
72 | self.assertEqual([r[0] for r in rows], [1, 2, 3, 4, 5, 6])
73 | self.assertEqual(rows[0][1], 1.0)
74 | self.assertEqual(rows[1][1], None)
75 | self.assertEqual(rows[2][1], 1.0)
76 | self.assertEqual(rows[3][1], None)
77 | self.assertAlmostEqual(rows[4][1], min_value)
78 | self.assertAlmostEqual(rows[5][1], max_value)
79 |
80 | def test_float(self):
81 | self.checkFloatType("float", -3.4028234663852886e+38, 3.4028234663852886e+38)
82 |
83 | def test_unsigned_float(self):
84 | self.checkFloatType("unsigned_float", 0, 3.4028234663852886e+38)
85 |
86 | def test_double(self):
87 | self.checkFloatType("double", -1.7976931348623158E+308, 1.7976931348623158E+308)
88 |
89 | def test_unsigned_double(self):
90 | self.checkFloatType("unsigned_double", 0, 1.7976931348623158E+308)
91 |
92 | def test_decimal(self):
93 | self.createTable("phoenixdb_test_tbl1", "id integer primary key, val decimal(8,3)")
94 | with self.conn.cursor() as cursor:
95 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, 33333.333)")
96 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, NULL)")
97 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, ?)", [33333.333])
98 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", [Decimal('33333.333')])
99 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (5, ?)", [None])
100 | cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
101 | self.assertEqual(cursor.description[1].type_code, phoenixdb.NUMBER)
102 | rows = cursor.fetchall()
103 | self.assertEqual([r[0] for r in rows], [1, 2, 3, 4, 5])
104 | self.assertEqual(rows[0][1], Decimal('33333.333'))
105 | self.assertEqual(rows[1][1], None)
106 | self.assertEqual(rows[2][1], Decimal('33333.333'))
107 | self.assertEqual(rows[3][1], Decimal('33333.333'))
108 | self.assertEqual(rows[4][1], None)
109 | self.assertRaises(
110 | self.conn.DatabaseError, cursor.execute,
111 | "UPSERT INTO phoenixdb_test_tbl1 VALUES (100, ?)", [Decimal('1234567890')])
112 | self.assertRaises(
113 | self.conn.DatabaseError, cursor.execute,
114 | "UPSERT INTO phoenixdb_test_tbl1 VALUES (101, ?)", [Decimal('123456.789')])
115 |
116 | def test_boolean(self):
117 | self.createTable("phoenixdb_test_tbl1", "id integer primary key, val boolean")
118 | with self.conn.cursor() as cursor:
119 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, TRUE)")
120 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, FALSE)")
121 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, NULL)")
122 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", [True])
123 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (5, ?)", [False])
124 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (6, ?)", [None])
125 | cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
126 | self.assertEqual(cursor.description[1].type_code, phoenixdb.BOOLEAN)
127 | self.assertEqual(cursor.fetchall(), [[1, True], [2, False], [3, None], [4, True], [5, False], [6, None]])
128 |
129 | def test_time(self):
130 | self.createTable("phoenixdb_test_tbl1", "id integer primary key, val time")
131 | with self.conn.cursor() as cursor:
132 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, '1970-01-01 12:01:02')")
133 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, NULL)")
134 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, ?)", [phoenixdb.Time(12, 1, 2)])
135 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", [datetime.time(12, 1, 2)])
136 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (5, ?)", [None])
137 | cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
138 | self.assertEqual(cursor.fetchall(), [
139 | [1, datetime.time(12, 1, 2)],
140 | [2, None],
141 | [3, datetime.time(12, 1, 2)],
142 | [4, datetime.time(12, 1, 2)],
143 | [5, None],
144 | ])
145 |
146 | @unittest.skip("https://issues.apache.org/jira/browse/CALCITE-797")
147 | def test_time_full(self):
148 | self.createTable("phoenixdb_test_tbl1", "id integer primary key, val time")
149 | with self.conn.cursor() as cursor:
150 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, '2015-07-12 13:01:02.123')")
151 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, ?)", [datetime.datetime(2015, 7, 12, 13, 1, 2, 123000)])
152 | cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
153 | self.assertEqual(cursor.fetchall(), [
154 | [1, datetime.datetime(2015, 7, 12, 13, 1, 2, 123000)],
155 | [2, datetime.datetime(2015, 7, 12, 13, 1, 2, 123000)],
156 | ])
157 |
158 | def test_date(self):
159 | self.createTable("phoenixdb_test_tbl1", "id integer primary key, val date")
160 | with self.conn.cursor() as cursor:
161 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, '2015-07-12 00:00:00')")
162 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, ?)", [phoenixdb.Date(2015, 7, 12)])
163 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", [datetime.date(2015, 7, 12)])
164 | cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
165 | self.assertEqual(cursor.fetchall(), [
166 | [1, datetime.date(2015, 7, 12)],
167 | [3, datetime.date(2015, 7, 12)],
168 | [4, datetime.date(2015, 7, 12)],
169 | ])
170 |
171 | @unittest.skip("https://issues.apache.org/jira/browse/CALCITE-798")
172 | def test_date_full(self):
173 | self.createTable("phoenixdb_test_tbl1", "id integer primary key, val date")
174 | with self.conn.cursor() as cursor:
175 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, '2015-07-12 13:01:02.123')")
176 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, ?)", [datetime.datetime(2015, 7, 12, 13, 1, 2, 123000)])
177 | cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
178 | self.assertEqual(cursor.fetchall(), [
179 | [1, datetime.datetime(2015, 7, 12, 13, 1, 2, 123000)],
180 | [2, datetime.datetime(2015, 7, 12, 13, 1, 2, 123000)],
181 | ])
182 |
183 | def test_date_null(self):
184 | self.createTable("phoenixdb_test_tbl1", "id integer primary key, val date")
185 | with self.conn.cursor() as cursor:
186 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, NULL)")
187 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, ?)", [None])
188 | cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id") # raises NullPointerException on the server
189 | self.assertEqual(cursor.fetchall(), [
190 | [1, None],
191 | [2, None],
192 | ])
193 |
194 | def test_timestamp(self):
195 | self.createTable("phoenixdb_test_tbl1", "id integer primary key, val timestamp")
196 | with self.conn.cursor() as cursor:
197 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, '2015-07-12 13:01:02.123')")
198 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, NULL)")
199 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, ?)", [phoenixdb.Timestamp(2015, 7, 12, 13, 1, 2)])
200 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", [datetime.datetime(2015, 7, 12, 13, 1, 2, 123000)])
201 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (5, ?)", [None])
202 | cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
203 | self.assertEqual(cursor.fetchall(), [
204 | [1, datetime.datetime(2015, 7, 12, 13, 1, 2, 123000)],
205 | [2, None],
206 | [3, datetime.datetime(2015, 7, 12, 13, 1, 2)],
207 | [4, datetime.datetime(2015, 7, 12, 13, 1, 2, 123000)],
208 | [5, None],
209 | ])
210 |
211 | @unittest.skip("https://issues.apache.org/jira/browse/CALCITE-796")
212 | def test_timestamp_full(self):
213 | self.createTable("phoenixdb_test_tbl1", "id integer primary key, val timestamp")
214 | with self.conn.cursor() as cursor:
215 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, '2015-07-12 13:01:02.123456789')")
216 | cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
217 | self.assertEqual(cursor.fetchall(), [
218 | [1, datetime.datetime(2015, 7, 12, 13, 1, 2, 123456789)],
219 | ])
220 |
221 | def test_varchar(self):
222 | self.createTable("phoenixdb_test_tbl1", "id integer primary key, val varchar")
223 | with self.conn.cursor() as cursor:
224 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, 'abc')")
225 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, NULL)")
226 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, ?)", ['abc'])
227 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", [None])
228 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (5, '')")
229 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (6, ?)", [''])
230 | cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
231 | self.assertEqual(cursor.fetchall(), [[1, 'abc'], [2, None], [3, 'abc'], [4, None], [5, None], [6, None]])
232 |
233 | def test_varchar_very_long(self):
234 | self.createTable("phoenixdb_test_tbl1", "id integer primary key, val varchar")
235 | with self.conn.cursor() as cursor:
236 | value = '1234567890' * 1000
237 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, ?)", [value])
238 | cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
239 | self.assertEqual(cursor.fetchall(), [[1, value]])
240 |
241 | def test_varchar_limited(self):
242 | self.createTable("phoenixdb_test_tbl1", "id integer primary key, val varchar(2)")
243 | with self.conn.cursor() as cursor:
244 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, 'ab')")
245 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, NULL)")
246 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, ?)", ['ab'])
247 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", [None])
248 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (5, '')")
249 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (6, ?)", [''])
250 | cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
251 | self.assertEqual(cursor.fetchall(), [[1, 'ab'], [2, None], [3, 'ab'], [4, None], [5, None], [6, None]])
252 | self.assertRaises(self.conn.DataError, cursor.execute, "UPSERT INTO phoenixdb_test_tbl1 VALUES (100, 'abc')")
253 |
254 | def test_char_null(self):
255 | self.createTable("phoenixdb_test_tbl1", "id integer primary key, val char(2)")
256 | with self.conn.cursor() as cursor:
257 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, NULL)")
258 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", [None])
259 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (5, '')")
260 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (6, ?)", [''])
261 | cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
262 | self.assertEqual(cursor.fetchall(), [[2, None], [4, None], [5, None], [6, None]])
263 | self.assertRaises(self.conn.DataError, cursor.execute, "UPSERT INTO phoenixdb_test_tbl1 VALUES (100, 'abc')")
264 |
265 | def test_char(self):
266 | self.createTable("phoenixdb_test_tbl1", "id integer primary key, val char(2)")
267 | with self.conn.cursor() as cursor:
268 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, 'ab')")
269 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, ?)", ['ab'])
270 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, 'a')")
271 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", ['b'])
272 | cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
273 | self.assertEqual(cursor.fetchall(), [[1, 'ab'], [2, 'ab'], [3, 'a'], [4, 'b']])
274 | self.assertRaises(self.conn.DataError, cursor.execute, "UPSERT INTO phoenixdb_test_tbl1 VALUES (100, 'abc')")
275 |
276 | def test_binary(self):
277 | self.createTable("phoenixdb_test_tbl1", "id integer primary key, val binary(2)")
278 | with self.conn.cursor() as cursor:
279 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, 'ab')")
280 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, ?)", [phoenixdb.Binary(b'ab')])
281 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (3, '\x01\x00')")
282 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (4, ?)", [phoenixdb.Binary(b'\x01\x00')])
283 | cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
284 | self.assertEqual(cursor.fetchall(), [
285 | [1, b'ab'],
286 | [2, b'ab'],
287 | [3, b'\x01\x00'],
288 | [4, b'\x01\x00'],
289 | ])
290 |
291 | def test_binary_all_bytes(self):
292 | self.createTable("phoenixdb_test_tbl1", "id integer primary key, val binary(256)")
293 | with self.conn.cursor() as cursor:
294 | if sys.version_info[0] < 3:
295 | value = ''.join(map(chr, range(256)))
296 | else:
297 | value = bytes(range(256))
298 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, ?)", [phoenixdb.Binary(value)])
299 | cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
300 | self.assertEqual(cursor.fetchall(), [[1, value]])
301 |
302 | @unittest.skip("https://issues.apache.org/jira/browse/CALCITE-1050 https://issues.apache.org/jira/browse/PHOENIX-2585")
303 | def test_array(self):
304 | self.createTable("phoenixdb_test_tbl1", "id integer primary key, val integer[]")
305 | with self.conn.cursor() as cursor:
306 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (1, ARRAY[1, 2])")
307 | cursor.execute("UPSERT INTO phoenixdb_test_tbl1 VALUES (2, ?)", [[2, 3]])
308 | cursor.execute("SELECT id, val FROM phoenixdb_test_tbl1 ORDER BY id")
309 | self.assertEqual(cursor.fetchall(), [
310 | [1, [1, 2]],
311 | [2, [2, 3]],
312 | ])
313 |
--------------------------------------------------------------------------------
/phoenixdb/types.py:
--------------------------------------------------------------------------------
1 | # Copyright 2015 Lukas Lalinsky
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License");
4 | # you may not use this file except in compliance with the License.
5 | # You may obtain a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS,
11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 | # See the License for the specific language governing permissions and
13 | # limitations under the License.
14 |
15 | import sys
16 | import time
17 | import datetime
18 | from decimal import Decimal
19 | from phoenixdb.avatica.proto import common_pb2
20 |
21 | __all__ = [
22 | 'Date', 'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks', 'TimestampFromTicks',
23 | 'Binary', 'STRING', 'BINARY', 'NUMBER', 'DATETIME', 'ROWID', 'BOOLEAN',
24 | 'JAVA_CLASSES', 'JAVA_CLASSES_MAP', 'TypeHelper',
25 | ]
26 |
27 |
28 | def Date(year, month, day):
29 | """Constructs an object holding a date value."""
30 | return datetime.date(year, month, day)
31 |
32 |
33 | def Time(hour, minute, second):
34 | """Constructs an object holding a time value."""
35 | return datetime.time(hour, minute, second)
36 |
37 |
38 | def Timestamp(year, month, day, hour, minute, second):
39 | """Constructs an object holding a datetime/timestamp value."""
40 | return datetime.datetime(year, month, day, hour, minute, second)
41 |
42 |
43 | def DateFromTicks(ticks):
44 | """Constructs an object holding a date value from the given UNIX timestamp."""
45 | return Date(*time.localtime(ticks)[:3])
46 |
47 |
48 | def TimeFromTicks(ticks):
49 | """Constructs an object holding a time value from the given UNIX timestamp."""
50 | return Time(*time.localtime(ticks)[3:6])
51 |
52 |
53 | def TimestampFromTicks(ticks):
54 | """Constructs an object holding a datetime/timestamp value from the given UNIX timestamp."""
55 | return Timestamp(*time.localtime(ticks)[:6])
56 |
57 |
58 | def Binary(value):
59 | """Constructs an object capable of holding a binary (long) string value."""
60 | return bytes(value)
61 |
62 |
63 | def time_from_java_sql_time(n):
64 | dt = datetime.datetime(1970, 1, 1) + datetime.timedelta(milliseconds=n)
65 | return dt.time()
66 |
67 |
68 | def time_to_java_sql_time(t):
69 | return ((t.hour * 60 + t.minute) * 60 + t.second) * 1000 + t.microsecond // 1000
70 |
71 |
72 | def date_from_java_sql_date(n):
73 | return datetime.date(1970, 1, 1) + datetime.timedelta(days=n)
74 |
75 |
76 | def date_to_java_sql_date(d):
77 | if isinstance(d, datetime.datetime):
78 | d = d.date()
79 | td = d - datetime.date(1970, 1, 1)
80 | return td.days
81 |
82 |
83 | def datetime_from_java_sql_timestamp(n):
84 | return datetime.datetime(1970, 1, 1) + datetime.timedelta(milliseconds=n)
85 |
86 |
87 | def datetime_to_java_sql_timestamp(d):
88 | td = d - datetime.datetime(1970, 1, 1)
89 | return td.microseconds // 1000 + (td.seconds + td.days * 24 * 3600) * 1000
90 |
91 |
92 | class ColumnType(object):
93 |
94 | def __init__(self, eq_types):
95 | self.eq_types = tuple(eq_types)
96 | self.eq_types_set = set(eq_types)
97 |
98 | def __eq__(self, other):
99 | return other in self.eq_types_set
100 |
101 | def __cmp__(self, other):
102 | if other in self.eq_types_set:
103 | return 0
104 | if other < self.eq_types:
105 | return 1
106 | else:
107 | return -1
108 |
109 |
110 | STRING = ColumnType(['VARCHAR', 'CHAR'])
111 | """Type object that can be used to describe string-based columns."""
112 |
113 | BINARY = ColumnType(['BINARY', 'VARBINARY'])
114 | """Type object that can be used to describe (long) binary columns."""
115 |
116 | NUMBER = ColumnType([
117 | 'INTEGER', 'UNSIGNED_INT', 'BIGINT', 'UNSIGNED_LONG', 'TINYINT', 'UNSIGNED_TINYINT',
118 | 'SMALLINT', 'UNSIGNED_SMALLINT', 'FLOAT', 'UNSIGNED_FLOAT', 'DOUBLE', 'UNSIGNED_DOUBLE', 'DECIMAL'
119 | ])
120 | """Type object that can be used to describe numeric columns."""
121 |
122 | DATETIME = ColumnType(['TIME', 'DATE', 'TIMESTAMP', 'UNSIGNED_TIME', 'UNSIGNED_DATE', 'UNSIGNED_TIMESTAMP'])
123 | """Type object that can be used to describe date/time columns."""
124 |
125 | ROWID = ColumnType([])
126 | """Only implemented for DB API 2.0 compatibility, not used."""
127 |
128 | BOOLEAN = ColumnType(['BOOLEAN'])
129 | """Type object that can be used to describe boolean columns. This is a phoenixdb-specific extension."""
130 |
131 |
132 | # XXX ARRAY
133 |
134 | if sys.version_info[0] < 3:
135 | _long = long # noqa: F821
136 | else:
137 | _long = int
138 |
139 | JAVA_CLASSES = {
140 | 'bool_value': [
141 | ('java.lang.Boolean', common_pb2.BOOLEAN, None, None),
142 | ],
143 | 'string_value': [
144 | ('java.lang.Character', common_pb2.CHARACTER, None, None),
145 | ('java.lang.String', common_pb2.STRING, None, None),
146 | ('java.math.BigDecimal', common_pb2.BIG_DECIMAL, str, Decimal),
147 | ],
148 | 'number_value': [
149 | ('java.lang.Integer', common_pb2.INTEGER, None, int),
150 | ('java.lang.Short', common_pb2.SHORT, None, int),
151 | ('java.lang.Long', common_pb2.LONG, None, _long),
152 | ('java.lang.Byte', common_pb2.BYTE, None, int),
153 | ('java.sql.Time', common_pb2.JAVA_SQL_TIME, time_to_java_sql_time, time_from_java_sql_time),
154 | ('java.sql.Date', common_pb2.JAVA_SQL_DATE, date_to_java_sql_date, date_from_java_sql_date),
155 | ('java.sql.Timestamp', common_pb2.JAVA_SQL_TIMESTAMP, datetime_to_java_sql_timestamp, datetime_from_java_sql_timestamp),
156 | ],
157 | 'bytes_value': [
158 | ('[B', common_pb2.BYTE_STRING, Binary, None),
159 | ],
160 | 'double_value': [
161 | # if common_pb2.FLOAT is used, incorrect values are sent
162 | ('java.lang.Float', common_pb2.DOUBLE, float, float),
163 | ('java.lang.Double', common_pb2.DOUBLE, float, float),
164 | ]
165 | }
166 | """Groups of Java classes."""
167 |
168 | JAVA_CLASSES_MAP = dict((v[0], (k, v[1], v[2], v[3])) for k in JAVA_CLASSES for v in JAVA_CLASSES[k])
169 | """Flips the available types to allow for faster lookup by Java class.
170 |
171 | This mapping should be structured as:
172 | {
173 | 'java.math.BigDecimal': ('string_value', common_pb2.BIG_DECIMAL, str, Decimal),),
174 | ...
175 | '': (, , , ),
176 | }
177 | """
178 |
179 |
180 | class TypeHelper(object):
181 | @staticmethod
182 | def from_class(klass):
183 | """Retrieves a Rep and functions to cast to/from based on the Java class.
184 |
185 | :param klass:
186 | The string of the Java class for the column or parameter.
187 |
188 | :returns: tuple ``(field_name, rep, mutate_to, cast_from)``
189 | WHERE
190 | ``field_name`` is the attribute in ``common_pb2.TypedValue``
191 | ``rep`` is the common_pb2.Rep enum
192 | ``mutate_to`` is the function to cast values into Phoenix values, if any
193 | ``cast_from`` is the function to cast from the Phoenix value to the Python value, if any
194 |
195 | :raises:
196 | NotImplementedError
197 | """
198 | if klass not in JAVA_CLASSES_MAP:
199 | raise NotImplementedError('type {} is not supported'.format(klass))
200 |
201 | return JAVA_CLASSES_MAP[klass]
202 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | -e git+https://bitbucket.org/lalinsky/python-sqlline.git#egg=sqlline
2 | nose
3 | protobuf>=3.0.0
4 | sphinx
5 | flake8
6 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [nosetests]
2 | verbosity=2
3 | testmatch=^test_.+
4 | where=phoenixdb/tests
5 |
6 | [build_sphinx]
7 | source-dir = doc
8 | build-dir = doc/build
9 | all_files = 1
10 |
11 | [upload_sphinx]
12 | upload-dir = doc/build/html
13 |
14 | [flake8]
15 | max-line-length = 140
16 | exclude =
17 | e,e3,env,venv,doc,build,dist,.tox,.idea,
18 | ./phoenixdb/tests/dbapi20.py,
19 | ./phoenixdb/avatica/proto/*_pb2.py
20 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import setup, find_packages
2 |
3 | cmdclass = {}
4 |
5 | try:
6 | from sphinx.setup_command import BuildDoc
7 | cmdclass['build_sphinx'] = BuildDoc
8 | except ImportError:
9 | pass
10 |
11 |
12 | def readme():
13 | with open('README.rst') as f:
14 | return f.read()
15 |
16 |
17 | version = "0.7"
18 |
19 | setup(
20 | name="phoenixdb",
21 | version=version,
22 | description="Phoenix database adapter for Python",
23 | long_description=readme(),
24 | author="Lukas Lalinsky",
25 | author_email="lukas@oxygene.sk",
26 | url="https://bitbucket.org/lalinsky/python-phoenixdb",
27 | license="Apache 2",
28 | packages=find_packages(),
29 | include_package_data=True,
30 | cmdclass=cmdclass,
31 | command_options={
32 | 'build_sphinx': {
33 | 'version': ('setup.py', version),
34 | 'release': ('setup.py', version),
35 | },
36 | },
37 | classifiers=[
38 | 'Programming Language :: Python',
39 | 'Programming Language :: Python :: 2',
40 | 'Programming Language :: Python :: 2.7',
41 | 'Programming Language :: Python :: 3',
42 | 'Programming Language :: Python :: 3.4',
43 | 'Programming Language :: Python :: 3.5',
44 | 'Programming Language :: Python :: 3.6',
45 | ],
46 | install_requires=[
47 | 'protobuf>=3.0.0',
48 | ]
49 | )
50 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | envlist = py27,py35,py36
3 |
4 | [testenv]
5 | passenv = PHOENIXDB_TEST_DB_URL
6 | commands =
7 | flake8
8 | nosetests -v
9 | deps = -rrequirements.txt
10 |
--------------------------------------------------------------------------------