├── .arcconfig ├── .devcontainer ├── Dockerfile ├── devcontainer.json └── docker-compose.yml ├── .flake8 ├── .github └── workflows │ └── all.yml ├── .gitignore ├── CHANGES.txt ├── LICENSE.txt ├── MANIFEST.in ├── Makefile ├── README.md ├── examples ├── basic.py ├── multi_threaded_inserts.py └── network_testing.py ├── memsql ├── __init__.py └── common │ ├── __init__.py │ ├── connection_pool.py │ ├── conversions.py │ ├── database.py │ ├── errorcodes.py │ ├── exceptions.py │ ├── json.py │ ├── query_builder.py │ ├── random_aggregator_pool.py │ ├── test │ ├── __init__.py │ ├── conftest.py │ ├── test_connection_pool.py │ ├── test_database_adapters.py │ ├── test_query_builder.py │ ├── test_select_result.py │ └── thread_monitor.py │ └── util.py └── setup.py /.arcconfig: -------------------------------------------------------------------------------- 1 | { 2 | "project_id": "memsql-python", 3 | "conduit_uri" : "https:\/\/grizzly.internal.memcompute.com\/api\/" 4 | } 5 | -------------------------------------------------------------------------------- /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | # [Choice] Python version (use -bullseye variants on local arm64/Apple Silicon): 3, 3.10, 3.9, 3.8, 3.7, 3.6, 3-bullseye, 3.10-bullseye, 3.9-bullseye, 3.8-bullseye, 3.7-bullseye, 3.6-bullseye, 3-buster, 3.10-buster, 3.9-buster, 3.8-buster, 3.7-buster, 3.6-buster 2 | ARG VARIANT=3-bullseye 3 | FROM mcr.microsoft.com/vscode/devcontainers/python:0-${VARIANT} 4 | 5 | ENV PYTHONUNBUFFERED 1 6 | 7 | # [Choice] Node.js version: none, lts/*, 16, 14, 12, 10 8 | ARG NODE_VERSION="none" 9 | RUN if [ "${NODE_VERSION}" != "none" ]; then su vscode -c "umask 0002 && . /usr/local/share/nvm/nvm.sh && nvm install ${NODE_VERSION} 2>&1"; fi 10 | 11 | # [Optional] If your requirements rarely change, uncomment this section to add them to the image. 12 | # COPY requirements.txt /tmp/pip-tmp/ 13 | # RUN pip3 --disable-pip-version-check --no-cache-dir install -r /tmp/pip-tmp/requirements.txt \ 14 | # && rm -rf /tmp/pip-tmp 15 | 16 | # [Optional] Uncomment this section to install additional OS packages. 17 | RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ 18 | && apt-get -y install --no-install-recommends mariadb-client 19 | 20 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | // For format details, see https://aka.ms/devcontainer.json. For config options, see the README at: 2 | // https://github.com/microsoft/vscode-dev-containers/tree/v0.224.3/containers/python-3-postgres 3 | // Update the VARIANT arg in docker-compose.yml to pick a Python version 4 | { 5 | "name": "Python 3 & PostgreSQL", 6 | "dockerComposeFile": "docker-compose.yml", 7 | "service": "app", 8 | "workspaceFolder": "/workspace", 9 | 10 | // Set *default* container specific settings.json values on container create. 11 | "settings": { 12 | "python.defaultInterpreterPath": "/usr/local/bin/python", 13 | "python.linting.enabled": true, 14 | "python.linting.flake8Enabled": true, 15 | "python.formatting.autopep8Path": "/usr/local/py-utils/bin/autopep8", 16 | "python.formatting.blackPath": "/usr/local/py-utils/bin/black", 17 | "python.formatting.yapfPath": "/usr/local/py-utils/bin/yapf", 18 | "python.linting.banditPath": "/usr/local/py-utils/bin/bandit", 19 | "python.linting.flake8Path": "/usr/local/py-utils/bin/flake8", 20 | "python.linting.mypyPath": "/usr/local/py-utils/bin/mypy", 21 | "python.linting.pycodestylePath": "/usr/local/py-utils/bin/pycodestyle", 22 | "python.linting.pydocstylePath": "/usr/local/py-utils/bin/pydocstyle", 23 | "python.linting.pylintPath": "/usr/local/py-utils/bin/pylint", 24 | "python.testing.pytestPath": "/usr/local/py-utils/bin/pytest" 25 | }, 26 | 27 | // Add the IDs of extensions you want installed when the container is created. 28 | "extensions": ["ms-python.python", "ms-python.vscode-pylance"], 29 | 30 | // Use 'forwardPorts' to make a list of ports inside the container available locally. 31 | // This can be used to network with other containers or the host. 32 | // "forwardPorts": [5000, 5432], 33 | 34 | // Use 'postCreateCommand' to run commands after the container is created. 35 | "postCreateCommand": "pip install . && pip install twine pytest", 36 | 37 | // Comment out to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root. 38 | "remoteUser": "vscode" 39 | } 40 | -------------------------------------------------------------------------------- /.devcontainer/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | 3 | services: 4 | app: 5 | build: 6 | context: .. 7 | dockerfile: .devcontainer/Dockerfile 8 | args: 9 | # Update 'VARIANT' to pick a version of Python: 3, 3.10, 3.9, 3.8, 3.7, 3.6 10 | # Append -bullseye or -buster to pin to an OS version. 11 | # Use -bullseye variants on local arm64/Apple Silicon. 12 | VARIANT: 3.6-buster 13 | # Optional Node.js version to install 14 | NODE_VERSION: "none" 15 | 16 | volumes: 17 | - ..:/workspace:cached 18 | 19 | # Overrides default command so things don't shut down after the process ends. 20 | command: sleep infinity 21 | 22 | environment: 23 | - MEMSQL_PYTHON_TEST_HOST=db 24 | 25 | # Runs app on the same network as the database container, allows "forwardPorts" in devcontainer.json function. 26 | network_mode: service:db 27 | # Uncomment the next line to use a non-root user for all processes. 28 | # user: vscode 29 | 30 | db: 31 | image: mysql:5.7.37 32 | restart: unless-stopped 33 | environment: 34 | MYSQL_ROOT_PASSWORD: mysql 35 | 36 | volumes: 37 | postgres-data: null 38 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | exclude: .git,__pycache__,*.pyc,venv,venv2.7,venv3.4,distribution,.eggs 3 | ignore: E121,E128,E201,E202,E221,E222,E241,E302,E4,E5,E305 4 | -------------------------------------------------------------------------------- /.github/workflows/all.yml: -------------------------------------------------------------------------------- 1 | name: Test 2 | 3 | on: 4 | push: 5 | pull_request: 6 | workflow_dispatch: 7 | 8 | jobs: 9 | test: 10 | runs-on: ubuntu-latest 11 | 12 | services: 13 | mysql: 14 | image: mysql:5.7.37 15 | ports: 16 | - 3306:3306 17 | env: 18 | MYSQL_ROOT_PASSWORD: mysql 19 | 20 | steps: 21 | - uses: actions/checkout@v2 22 | 23 | - name: Set up Python 24 | uses: actions/setup-python@v2 25 | with: 26 | python-version: 3.9 27 | 28 | - name: Install dependencies 29 | run: | 30 | python -m pip install --upgrade pip 31 | python -m pip install flake8 pytest mock ordereddict 32 | pip install . 33 | 34 | - name: Run tests 35 | run: | 36 | pytest memsql 37 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[cod] 2 | 3 | # C extensions 4 | *.so 5 | 6 | # Packages 7 | *.egg 8 | *.egg-info 9 | dist 10 | build 11 | eggs 12 | parts 13 | bin 14 | var 15 | sdist 16 | develop-eggs 17 | .installed.cfg 18 | lib 19 | lib64 20 | __pycache__ 21 | .cache 22 | .eggs 23 | 24 | # Installer logs 25 | pip-log.txt 26 | 27 | /venv* 28 | .pytest_cache 29 | -------------------------------------------------------------------------------- /CHANGES.txt: -------------------------------------------------------------------------------- 1 | 2022-03-17 Version 3.2.0 2 | 3 | * Support mysqlclient > 2.1 (#20) 4 | * Fix BINARY column support (#21) 5 | 6 | 2020-04-25 Version 3.1.0 7 | 8 | * Upgrade mysqlclient to 1.4 or greator 9 | 10 | 2019-07-24 Version 3.0.0 11 | 12 | * Remove Python 2 support 13 | * Updating mysqlclient dependency 14 | * Remove SQL Lock 15 | * Remove SQL Utility 16 | * Remove SQL Step Queue 17 | 18 | 2018-12-18 Version 2.19.0 19 | 20 | * Remove explicit pinning of six dependency 21 | * Relax dateutil dependency to `<3.0` 22 | * Remove Python 2.6 support 23 | 24 | 2018-07-17 Version 2.18.0 25 | 26 | * Upgrade six dependency to version 1.11.0 27 | 28 | 2016-09-08 Version 2.17.0 29 | 30 | * Add version to ConnectionPool to allow expiring of old connections after an event 31 | 32 | 2016-07-09 Version 2.16.0 33 | 34 | * performance improvement when retrieving large numbers of rows from the database. 35 | 36 | 2015-10-08 Version 2.15.0 37 | 38 | * Upgrade to mysqlclient 1.3.6 39 | 40 | 2015-05-14 Version 2.14.5 41 | 42 | * Fixed bug with invalid connections in the ConnectionPool 43 | 44 | 2014-12-19 Version 2.14.4 45 | 46 | * Fixed bug with Unicode escaping in Python 2 47 | 48 | 2014-12-16 Version 2.14.3 49 | 50 | * Fixed bug with the arbitrary options code 51 | 52 | 2014-12-16 Version 2.14.2 53 | 54 | * Support passing arbitrary options to underlying database connection 55 | 56 | 2014-12-16 Version 2.14.1 57 | 58 | * Support unix_socket param to database connection 59 | 60 | 2014-10-01 Version 2.14.0 61 | 62 | * Python 3 support 63 | 64 | 2014-06-27 Version 2.13.1 65 | 66 | * Expose thread_id method on database adapter 67 | 68 | 2014-06-09 Version 2.13.0 69 | 70 | * Move to DATETIME and client side UTC timestamps 71 | * Requires a full table migration for anything using sql_step_queue or sql_lock 72 | 73 | 2014-06-06 Version 2.12.2 74 | 75 | * Refactor SQLStepQueue into multiple files - still backwards compat from an import perspective 76 | * Allow user to pass in different TaskHandler class 77 | 78 | 2014-06-05 Version 2.12.1 79 | 80 | * When refreshing a SQLStepQueue task we need to parse step start/stop times as datetime's 81 | 82 | 2014-06-04 Version 2.12.0 83 | 84 | * Added update_count column to SQLStepQueue 85 | * this ensures that every update causes a return value of 1 changed row, so long as there is at least one row to change. 86 | * Return the task id from enqueue 87 | 88 | 2014-06-02 Version 2.11.2 89 | 90 | * Fixed bug in SQLStepQueue 91 | 92 | 2014-05-23 Version 2.11.1 93 | 94 | * Fixed bug in SQLStepQueue 95 | 96 | 2014-03-26 Version 2.11.0 97 | 98 | * Improved performance of the Row class 99 | * Semi-mutable 100 | * Mostly backwards compat, but certain delete key operations will fail 101 | 102 | 2014-03-12 Version 2.10.4 103 | 104 | * Added query_builder module to help write complex SQL queries. 105 | 106 | 2014-03-09 Version 2.10.3 107 | 108 | * Re-enabled _ensure_connected 109 | * Added select_db method to database connections 110 | 111 | 2014-03-09 Version 2.10.2 112 | 113 | * Made it easier to modify data on a task_handler 114 | * Better unicode support 115 | 116 | 2014-02-03 Version 2.10.1 117 | 118 | * 2.10.0 made certain queries go through the formatter even without 119 | params, this broke things like having single percents in queries. 120 | 121 | 2014-02-02 Version 2.10.0 122 | 123 | * Modified SQLStepQueue to work with MemSQL distributed 124 | * added tests 125 | * added extra_predicate (to pop/inspect different subsets of the queue) 126 | * exposed escaping in database.py 127 | * fixed some 2.6 compatibility issues 128 | * added requeue and bulk_finish to sql_step_queue 129 | 130 | 2014-01-03 Version 2.9.0 131 | 132 | * Implemented _mysql 1.2.5 compatability 133 | * All of the query methods now take lists and tuples as 134 | arguments. They will expand into ','.join(escaped_sequence). 135 | * Public interface should be 100% backwards compatable 136 | * All of the query methods now take kwargs. If you specify kwargs, your 137 | query will require %(arg_name)s blocks as opposed to %s blocks. You 138 | can't pass both args and kwargs. 139 | 140 | 2013-11-26 Version 2.8.2 141 | 142 | * Fixed a random_aggregator_pool bug 143 | 144 | 2013-11-20 Version 2.8.1 145 | 146 | * Fixed a minor 2.6 deprecation warning 147 | 148 | 2013-10-29 Version 2.8.0 149 | 150 | * Extracted collectd plugin into a seperate library: memsql-collectd 151 | * https://github.com/memsql/memsql-collectd 152 | * Better make clean 153 | 154 | 2013-10-23 Version 2.7.2 155 | 156 | * Fixed Master node detection in certain situations 157 | 158 | 2013-10-18 Version 2.7.1 159 | 160 | * Tweaks to Network Testing module 161 | * Changed the API a bit to operate better in a distributed environment 162 | 163 | 2013-10-18 Version 2.7.0 164 | 165 | * Added Network Testing module 166 | * Kick off a bandwidth or latency test against a MemSQL server 167 | * Lots of little tweaks here and there 168 | * Refactored out some shared logic into sql_utility 169 | * Insert MemSQL variables into the Ops Facts system 170 | 171 | 2013-10-15 Version 2.6.0 172 | 173 | * Added a useful class called SQLLockManager 174 | * Produces SQLLock's attached to a database table 175 | * These provide the means to coordinate resource access in a distributed system 176 | * Locks timeout if they havn't been pinged in a configurable expiry time 177 | * Ensure deadlocks can't happen due to parts of the distributed architecture failing 178 | 179 | 2013-10-10 Version 2.5.0 180 | 181 | * The MemSQL collect plugin now flushes data using a thread 182 | * this helps stabalize insert timing 183 | * keeps collectd from falling behind by 40ms+ during a flush 184 | * Don't send diskinfo from the collectd plugin, the df plugin is now required for Ops 185 | 186 | 2013-10-07 Version 2.4.0 187 | 188 | * Added a useful class called SQLStepQueue 189 | * Requires MemSQL 2.5 to work due to using JSON under the hood. 190 | * Fully atomic queue, that is designed to also store data forever 191 | * Each enqueued item can be started, multiple steps can be executed on it, and then it can be stoped 192 | * Timing information for each step is stored on the item's execution row 193 | 194 | 2013-10-07 Version 2.3.4 195 | 196 | * Fixed a minor exception that was raised when a node isn't a MemSQL node 197 | 198 | 2013-09-26 Version 2.3.3 199 | 200 | * Implemented garbage collection for very old pending analytical rows 201 | 202 | 2013-09-26 Version 2.3.2 203 | 204 | * Removed test package from setup.py 205 | 206 | 2013-09-25 Version 2.3.1 207 | 208 | * Minor bug fix - NaN values are no longer sent to MemSQL 209 | 210 | 2013-09-16 Version 2.3.0 211 | 212 | * Send disk space usage from our plugin for memsql nodes. We do 213 | this for only the disk that is used by the data directory. 214 | 215 | 2013-09-12 Version 2.2.0 216 | 217 | * Send certain MemSQL stats to the cluster as COUNTER types 218 | 219 | 2013-09-11 Version 2.1.1 220 | 221 | * Minor change to how analytics data is stored in MemSQL Ops 222 | 223 | 2013-09-09 Version 2.1.0 224 | 225 | * OperationalErrors are only raised when an actual connection issue has occurred. 226 | * All other non-fatal issues are raised as DatabaseErrors 227 | 228 | 2013-09-06 Version 2.0.0 229 | 230 | * Renamed database_mysqldb -> database 231 | * other minor changes 232 | * preparing for open source release 233 | 234 | 2013-09-04 Version 1.2.0 235 | 236 | * Lots of changes, primarily MemSQL Ops v2.2.0+ support 237 | * Auto-detect if the running collectd node is part of a MemSQL 238 | distributed cluster. If so we track show_status and insert it 239 | into the analytics table. 240 | 241 | 2013-07-25 Version 1.0.0 242 | 243 | * Initial release. 244 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2022 SingleStore (https://www.singlestore.com) 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 21 | THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include *.txt 2 | include *.md 3 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: 2 | rm -rf build/ dist; ./setup.py sdist 3 | 4 | upload: all 5 | python3 setup.py sdist 6 | twine upload dist/* 7 | 8 | clean: 9 | rm -rf *.egg memsql.egg-info dist build 10 | python3 setup.py clean --all 11 | for _kill_path in $$(find . -type f -name "*.pyc"); do rm -f $$_kill_path; done 12 | for _kill_path in $$(find . -name "__pycache__"); do rm -rf $$_kill_path; done 13 | 14 | test: 15 | python3 setup.py test 16 | 17 | test-watch: 18 | python3 setup.py test -w 19 | 20 | .PHONY: flake8 21 | flake8: 22 | flake8 --config=.flake8 . -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MemSQL Python Libraries 2 | 3 | This library contains various plugins and wrappers designed by MemSQL 4 | engineers for a couple of important python libraries. 5 | 6 | ## Install 7 | 8 | ```bash 9 | pip install memsql 10 | ``` 11 | 12 | Copy and paste the following steps to get started quickly on Ubuntu: 13 | 14 | ```bash 15 | sudo apt-get update 16 | sudo apt-get install -y mysql-client python-dev libmysqlclient-dev python-pip 17 | sudo pip install memsql 18 | ``` 19 | 20 | Copy and paste the following to get 21 | started with RHEL based distributions such as Amazon Linux or Centos: 22 | 23 | ```bash 24 | sudo yum update 25 | sudo yum install -y gcc mysql-devel 26 | sudo pip install memsql 27 | ``` 28 | 29 | ## Testing 30 | 31 | Run tests by executing `make test`. 32 | -------------------------------------------------------------------------------- /examples/basic.py: -------------------------------------------------------------------------------- 1 | from memsql.common import database 2 | 3 | conn = database.connect(host="127.0.0.1", port=3306, user="root", password="") 4 | print(conn.query("show databases")) 5 | -------------------------------------------------------------------------------- /examples/multi_threaded_inserts.py: -------------------------------------------------------------------------------- 1 | import time 2 | import threading 3 | 4 | from memsql.common import database 5 | 6 | # Specify connection information for a MemSQL node 7 | HOST = "127.0.0.1" 8 | PORT = 3306 9 | USER = "root" 10 | PASSWORD = "" 11 | 12 | # Specify which database and table to work with. 13 | # Note: this database will be dropped at the end of this script 14 | DATABASE = "test" 15 | TABLE = "tbl" 16 | 17 | # The number of workers to run 18 | NUM_WORKERS = 20 19 | 20 | # Run the workload for this many seconds 21 | WORKLOAD_TIME = 10 22 | 23 | # Batch size to use 24 | BATCH_SIZE = 5000 25 | 26 | # Pre-generate the workload query 27 | QUERY_TEXT = "INSERT INTO %s VALUES %s" % ( 28 | TABLE, ",".join(["()"] * BATCH_SIZE)) 29 | 30 | def get_connection(db=DATABASE): 31 | """ Returns a new connection to the database. """ 32 | return database.connect(host=HOST, port=PORT, user=USER, password=PASSWORD, database=db) 33 | 34 | class InsertWorker(threading.Thread): 35 | """ A simple thread which inserts empty rows in a loop. """ 36 | 37 | def __init__(self, stopping): 38 | super(InsertWorker, self).__init__() 39 | self.stopping = stopping 40 | self.daemon = True 41 | self.exception = None 42 | 43 | def run(self): 44 | with get_connection() as conn: 45 | while not self.stopping.is_set(): 46 | conn.execute(QUERY_TEXT) 47 | 48 | def setup_test_db(): 49 | """ Create a database and table for this benchmark to use. """ 50 | 51 | with get_connection(db="information_schema") as conn: 52 | print('Creating database %s' % DATABASE) 53 | conn.query('CREATE DATABASE IF NOT EXISTS %s' % DATABASE) 54 | conn.query('USE %s' % DATABASE) 55 | 56 | print('Creating table %s' % TABLE) 57 | conn.query('CREATE TABLE IF NOT EXISTS tbl (id INT AUTO_INCREMENT PRIMARY KEY)') 58 | 59 | def warmup(): 60 | print('Warming up workload') 61 | with get_connection() as conn: 62 | conn.execute(QUERY_TEXT) 63 | 64 | def run_benchmark(): 65 | """ Run a set of InsertWorkers and record their performance. """ 66 | 67 | stopping = threading.Event() 68 | workers = [ InsertWorker(stopping) for _ in range(NUM_WORKERS) ] 69 | 70 | print('Launching %d workers' % NUM_WORKERS) 71 | 72 | [ worker.start() for worker in workers ] 73 | time.sleep(WORKLOAD_TIME) 74 | 75 | print('Stopping workload') 76 | 77 | stopping.set() 78 | [ worker.join() for worker in workers ] 79 | 80 | with get_connection() as conn: 81 | count = conn.get("SELECT COUNT(*) AS count FROM %s" % TABLE).count 82 | 83 | print("%d rows inserted using %d workers" % (count, NUM_WORKERS)) 84 | print("%.1f rows per second" % (count / float(WORKLOAD_TIME))) 85 | 86 | def cleanup(): 87 | """ Cleanup the database this benchmark is using. """ 88 | 89 | with get_connection() as conn: 90 | conn.query('DROP DATABASE %s' % DATABASE) 91 | 92 | if __name__ == '__main__': 93 | try: 94 | setup_test_db() 95 | warmup() 96 | run_benchmark() 97 | except KeyboardInterrupt: 98 | print("Interrupted... exiting...") 99 | finally: 100 | cleanup() 101 | -------------------------------------------------------------------------------- /examples/network_testing.py: -------------------------------------------------------------------------------- 1 | from memsql.common import database 2 | from memsql.perf.network_tester import NetworkTester 3 | 4 | master_agg = 'master.cs.memcompute.com' 5 | test_node = 'leaf-1.cs.memcompute.com' 6 | iterations = 100 7 | payload_size = 1024 * 500 8 | 9 | conn = database.connect(host=master_agg, user='root') 10 | conn.execute('CREATE DATABASE IF NOT EXISTS performance') 11 | conn.execute('SET GLOBAL max_allowed_packet=%d' % (1024 * 1024 * 10)) 12 | 13 | m = NetworkTester(payload_size=payload_size).connect(host=master_agg, user='root', database='performance') 14 | if m.ready(): 15 | m.destroy() 16 | m.setup() 17 | 18 | n = NetworkTester().connect(host=test_node, user='root', database='performance') 19 | 20 | def pp(data, postfix, cb=lambda x: x): 21 | for k, v in data.items(): 22 | print k, cb(v), postfix 23 | 24 | print 'latancy' 25 | pp(n.estimate_latency(), 'ms') 26 | 27 | print '\nroundtrip' 28 | pp(n.estimate_roundtrip(iterations), 'MB/s', lambda x: (x / 1024 / 1024)) 29 | 30 | print '\nupload' 31 | pp(n.estimate_upload(iterations), 'MB/s', lambda x: (x / 1024 / 1024)) 32 | 33 | print '\ndownload' 34 | pp(n.estimate_download(iterations), 'MB/s', lambda x: (x / 1024 / 1024)) 35 | 36 | conn.execute('DROP DATABASE performance') 37 | -------------------------------------------------------------------------------- /memsql/__init__.py: -------------------------------------------------------------------------------- 1 | """ MemSQL-python 2 | """ 3 | 4 | __version__ = "3.2.0" 5 | -------------------------------------------------------------------------------- /memsql/common/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/memsql/memsql-python/4334c6291cb2294c6e52763ac14628080e9ba601/memsql/common/__init__.py -------------------------------------------------------------------------------- /memsql/common/connection_pool.py: -------------------------------------------------------------------------------- 1 | from MySQLdb import _mysql 2 | import errno 3 | import multiprocessing 4 | import logging 5 | from memsql.common import database 6 | 7 | try: 8 | import queue 9 | except ImportError: 10 | import Queue as queue 11 | 12 | MySQLError = database.MySQLError 13 | QUEUE_SIZE = 128 14 | 15 | class HashableDict(dict): 16 | def __hash__(self): 17 | return hash(frozenset(self.items())) 18 | 19 | class PoolConnectionException(IOError): 20 | """ This exception consolidates all connection exceptions into one thing """ 21 | 22 | def __init__(self, errno, message, connection_key): 23 | IOError.__init__(self, errno, message) 24 | (self.host, self.port, self.user, self.password, self.db_name, self.options, self.pid) = connection_key 25 | 26 | def _get_message(self): 27 | return self.args[1] 28 | 29 | message = property(_get_message) 30 | 31 | class ConnectionPool(object): 32 | def __init__(self): 33 | self.logger = logging.getLogger('memsql.connection_pool') 34 | self._connections = {} 35 | self._fairies = {} 36 | self._current_version = 0 37 | 38 | def rolling_restart(self): 39 | """ Gradually close all existing connections, allowing currently-used connections to finish. 40 | 41 | This may be used after MemSQL session state has changed and pre-existing connections 42 | are no longer valid. 43 | """ 44 | self._current_version = self._current_version + 1 45 | 46 | def connect(self, host, port, user, password, database, options=None): 47 | current_proc = multiprocessing.current_process() 48 | key = (host, port, user, password, database, HashableDict(options) if options else None, current_proc.pid) 49 | 50 | if key not in self._connections: 51 | self._connections[key] = queue.Queue(maxsize=QUEUE_SIZE) 52 | 53 | fairy = _PoolConnectionFairy(key, self) 54 | fairy.connect(self._current_version) 55 | self._fairies[fairy] = 1 56 | return fairy 57 | 58 | def checkin(self, fairy, key, conn, expire_connection=False): 59 | if key not in self._connections: 60 | self._connections[key] = queue.Queue(maxsize=QUEUE_SIZE) 61 | 62 | if expire_connection: 63 | try: 64 | conn.close() 65 | except Exception: 66 | self.logger.error("Could not close connection after fairy expired") 67 | elif (conn._version == self._current_version): 68 | try: 69 | self._connections[key].put_nowait(conn) 70 | except queue.Full: 71 | conn.close() 72 | else: 73 | conn.close() 74 | 75 | if fairy in self._fairies: 76 | del(self._fairies[fairy]) 77 | 78 | def close(self): 79 | for fairy in list(self._fairies.keys()): 80 | fairy.close() 81 | 82 | for q in self._connections.values(): 83 | while True: 84 | try: 85 | conn = q.get_nowait() 86 | conn.close() 87 | except queue.Empty: 88 | break 89 | self._current_version = self._current_version + 1 90 | 91 | def size(self): 92 | """ Returns the number of connections cached by the pool. """ 93 | return sum(q.qsize() for q in self._connections.values()) + len(self._fairies) 94 | 95 | class _PoolConnectionFairy(object): 96 | def __init__(self, key, pool): 97 | self._key = key 98 | self._pool = pool 99 | self._expired = False 100 | self._conn = None 101 | 102 | def expire(self): 103 | self._expired = True 104 | 105 | def close(self): 106 | self._pool.checkin(self, self._key, self._conn, expire_connection=self._expired) 107 | 108 | def connection_info(self): 109 | return (self._key[0], self._key[1]) 110 | 111 | def __enter__(self): 112 | return self 113 | 114 | def __exit__(self, exc_type, exc_value, traceback): 115 | self.close() 116 | 117 | def __wrap_errors(self, fn, *args, **kwargs): 118 | def wrapped(*args, **kwargs): 119 | try: 120 | return fn(*args, **kwargs) 121 | except IOError as e: 122 | if e.errno in [errno.ECONNRESET, errno.ECONNREFUSED, errno.ETIMEDOUT]: 123 | # socket connection issues 124 | self.__handle_connection_failure(e) 125 | else: 126 | raise 127 | except _mysql.OperationalError as e: 128 | # _mysql specific database connect issues, internal state issues 129 | if self._conn is not None: 130 | self.__potential_connection_failure(e) 131 | else: 132 | self.__handle_connection_failure(e) 133 | return wrapped 134 | 135 | def __potential_connection_failure(self, e): 136 | """ OperationalError's are emitted by the _mysql library for 137 | almost every error code emitted by MySQL. Because of this we 138 | verify that the error is actually a connection error before 139 | terminating the connection and firing off a PoolConnectionException 140 | """ 141 | try: 142 | self._conn.query('SELECT 1') 143 | except (IOError, _mysql.OperationalError): 144 | # ok, it's actually an issue. 145 | self.__handle_connection_failure(e) 146 | else: 147 | # seems ok, probably programmer error 148 | raise _mysql.DatabaseError(*e.args) 149 | 150 | def __handle_connection_failure(self, e): 151 | # expire the connection so we don't return it to the pool accidentally 152 | self.expire() 153 | 154 | # build and raise the new consolidated exception 155 | message = None 156 | if isinstance(e, _mysql.OperationalError) or (hasattr(e, 'args') and len(e.args) >= 2): 157 | err_num = e.args[0] 158 | message = e.args[1] 159 | elif hasattr(e, 'errno'): 160 | err_num = e.errno 161 | else: 162 | err_num = errno.ECONNABORTED 163 | 164 | raise PoolConnectionException(err_num, message, self._key) 165 | 166 | ################## 167 | # Wrap DB Api to deal with connection issues and so on in an intelligent way 168 | 169 | def connect(self, current_version): 170 | self._conn = None 171 | try: 172 | conn = self._pool._connections[self._key].get_nowait() 173 | if (conn._version == current_version) and self.__wrap_errors(conn.connected)(): 174 | self._conn = conn 175 | else: 176 | conn.close() 177 | except (queue.Empty, PoolConnectionException): 178 | pass 179 | 180 | if self._conn is None: 181 | (host, port, user, password, db_name, options, pid) = self._key 182 | _connect = self.__wrap_errors(database.connect) 183 | self._conn = _connect( 184 | host=host, port=port, user=user, password=password, 185 | database=db_name, _version=current_version, options=options) 186 | 187 | # catchall 188 | def __getattr__(self, key): 189 | method = getattr(self._conn, key, None) 190 | if method is None: 191 | raise AttributeError('Attribute `%s` does not exist' % key) 192 | else: 193 | return self.__wrap_errors(method) 194 | -------------------------------------------------------------------------------- /memsql/common/conversions.py: -------------------------------------------------------------------------------- 1 | from MySQLdb.constants import FIELD_TYPE, FLAG 2 | from MySQLdb.converters import conversions, Bool2Str 3 | from MySQLdb import times, _mysql 4 | import datetime 5 | import inspect 6 | 7 | CONVERSIONS = conversions 8 | 9 | def _bytes_to_utf8(b): 10 | return b.decode('utf-8') 11 | 12 | CONVERSIONS[FIELD_TYPE.STRING] = ((FLAG.BINARY, bytes), (None, _bytes_to_utf8)) 13 | CONVERSIONS[FIELD_TYPE.VAR_STRING] = ((FLAG.BINARY, bytes), (None, _bytes_to_utf8)) 14 | CONVERSIONS[FIELD_TYPE.VARCHAR] = ((FLAG.BINARY, bytes), (None, _bytes_to_utf8)) 15 | CONVERSIONS[FIELD_TYPE.BLOB] = ((FLAG.BINARY, bytes), (None, _bytes_to_utf8)) 16 | 17 | def _escape_bytes(b, c): 18 | return _mysql.string_literal(b, c).decode('utf-8') 19 | 20 | CONVERSIONS[bytes] = _escape_bytes 21 | 22 | def _escape_string(s, d): 23 | return _mysql.string_literal(s.encode('utf-8')).decode('utf-8') 24 | 25 | CONVERSIONS[str] = _escape_string 26 | 27 | def _escape_datetime(dt, c): 28 | return times.DateTime2literal(dt, c).decode('utf-8') 29 | def _escape_timedelta(dt, c): 30 | return times.DateTimeDelta2literal(dt, c).decode('utf-8') 31 | 32 | CONVERSIONS[datetime.datetime] = _escape_datetime 33 | CONVERSIONS[datetime.timedelta] = _escape_timedelta 34 | 35 | def _escape_bool(b, d): 36 | return Bool2Str(b, d).decode('utf-8') 37 | CONVERSIONS[bool] = _escape_bool 38 | -------------------------------------------------------------------------------- /memsql/common/database.py: -------------------------------------------------------------------------------- 1 | """A lightweight wrapper around _mysql.""" 2 | 3 | from MySQLdb import _mysql 4 | import MySQLdb 5 | import time 6 | import operator 7 | 8 | try: 9 | from _thread import get_ident as _get_ident 10 | except ImportError: 11 | from thread import get_ident as _get_ident 12 | 13 | from memsql.common.conversions import CONVERSIONS 14 | 15 | MySQLError = _mysql.MySQLError 16 | OperationalError = _mysql.OperationalError 17 | DatabaseError = _mysql.DatabaseError 18 | 19 | def connect(*args, **kwargs): 20 | return Connection(*args, **kwargs) 21 | 22 | class Connection(object): 23 | """A lightweight wrapper around _mysql DB-API connections. 24 | 25 | The main value we provide is wrapping rows in a dict/object so that 26 | columns can be accessed by name. Typical usage:: 27 | 28 | db = database.Connection("localhost", "mydatabase") 29 | for article in db.query("SELECT * FROM articles"): 30 | print article.title 31 | 32 | Cursors are hidden by the implementation, but other than that, the methods 33 | are very similar to the DB-API. 34 | 35 | We explicitly set the timezone to UTC and the character encoding to 36 | UTF-8 on all connections to avoid time zone and encoding errors. 37 | """ 38 | 39 | def __init__(self, host, port=3306, database="information_schema", user=None, password=None, 40 | max_idle_time=7 * 3600, _version=0, options=None): 41 | self.max_idle_time = max_idle_time 42 | 43 | args = { 44 | "db": database, 45 | "conv": CONVERSIONS 46 | } 47 | 48 | if user is not None: 49 | args["user"] = user 50 | if password is not None: 51 | args["passwd"] = password 52 | 53 | args["host"] = host 54 | args["port"] = int(port) 55 | 56 | if options is not None: 57 | assert isinstance(options, dict), "Options to database.Connection must be an dictionary of { str: value } pairs." 58 | args.update(options) 59 | 60 | # Fix for parameter name changes in mysqlclient v2.1.0 61 | if MySQLdb.version_info[:2] >= (2, 1): 62 | if "db" in args: 63 | args["database"] = args.pop("db") 64 | if "passwd" in args: 65 | args["password"] = args.pop("passwd") 66 | 67 | self._db = None 68 | self._db_args = args 69 | 70 | self._last_use_time = time.time() 71 | self.reconnect() 72 | self._db.set_character_set("utf8") 73 | 74 | self._version = _version 75 | 76 | def __del__(self): 77 | self.close() 78 | 79 | def __enter__(self): 80 | return self 81 | 82 | def __exit__(self, type, value, traceback): 83 | self.close() 84 | 85 | def close(self): 86 | """Closes this database connection.""" 87 | if getattr(self, "_db", None) is not None: 88 | self._db.close() 89 | self._db = None 90 | 91 | def connected(self): 92 | if self._db is not None: 93 | try: 94 | self.ping() 95 | return True 96 | except _mysql.InterfaceError: 97 | return False 98 | return False 99 | 100 | def reconnect(self): 101 | """Closes the existing database connection and re-opens it.""" 102 | conn = _mysql.connect(**self._db_args) 103 | if conn is not None: 104 | self.close() 105 | self._db = conn 106 | 107 | def select_db(self, database): 108 | self._db.select_db(database) 109 | 110 | # Fix for parameter name changes in mysqlclient v2.1.0 111 | if MySQLdb.version_info[:2] >= (2, 1): 112 | self._db_args['database'] = database 113 | else: 114 | self._db_args['db'] = database 115 | 116 | def ping(self): 117 | """ Ping the server """ 118 | return self._db.ping() 119 | 120 | def thread_id(self): 121 | """ Retrieve the thread id for the current connection """ 122 | return self._db.thread_id() 123 | 124 | def debug_query(self, query, *parameters, **kwparameters): 125 | return self._query(query, parameters, kwparameters, debug=True) 126 | 127 | def query(self, query, *parameters, **kwparameters): 128 | """ 129 | Query the connection and return the rows (or affected rows if not a 130 | select query). Mysql errors will be propogated as exceptions. 131 | """ 132 | return self._query(query, parameters, kwparameters) 133 | 134 | def get(self, query, *parameters, **kwparameters): 135 | """Returns the first row returned for the given query.""" 136 | rows = self._query(query, parameters, kwparameters) 137 | if not rows: 138 | return None 139 | elif not isinstance(rows, list): 140 | raise MySQLError("Query is not a select query") 141 | elif len(rows) > 1: 142 | raise MySQLError("Multiple rows returned for Database.get() query") 143 | else: 144 | return rows[0] 145 | 146 | # rowcount is a more reasonable default return value than lastrowid, 147 | # but for historical compatibility execute() must return lastrowid. 148 | def execute(self, query, *parameters, **kwparameters): 149 | """Executes the given query, returning the lastrowid from the query.""" 150 | return self.execute_lastrowid(query, *parameters, **kwparameters) 151 | 152 | def execute_lastrowid(self, query, *parameters, **kwparameters): 153 | """Executes the given query, returning the lastrowid from the query.""" 154 | self._execute(query, parameters, kwparameters) 155 | self._result = self._db.store_result() 156 | return self._db.insert_id() 157 | 158 | def _query(self, query, parameters, kwparameters, debug=False): 159 | self._execute(query, parameters, kwparameters, debug) 160 | 161 | self._result = self._db.store_result() 162 | if self._result is None: 163 | return self._rowcount 164 | 165 | fields = [ f[0] for f in self._result.describe() ] 166 | rows = self._result.fetch_row(0) 167 | return SelectResult(fields, rows) 168 | 169 | def _execute(self, query, parameters, kwparameters, debug=False): 170 | if parameters and kwparameters: 171 | raise ValueError('database.py querying functions can receive *args or **kwargs, but not both') 172 | 173 | query = escape_query(query, parameters or kwparameters) 174 | if debug: 175 | print(query) 176 | 177 | self._ensure_connected() 178 | self._db.query(query) 179 | self._rowcount = self._db.affected_rows() 180 | 181 | def _ensure_connected(self): 182 | # Mysql by default closes client connections that are idle for 183 | # 8 hours, but the client library does not report this fact until 184 | # you try to perform a query and it fails. Protect against this 185 | # case by preemptively closing and reopening the connection 186 | # if it has been idle for too long (7 hours by default). 187 | if (self._db is None or (time.time() - self._last_use_time > self.max_idle_time)): 188 | self.reconnect() 189 | self._last_use_time = time.time() 190 | 191 | 192 | class Row(object): 193 | """A fast, ordered, partially-immutable dictlike object (or objectlike dict).""" 194 | 195 | def __init__(self, fields, values): 196 | self._fields = fields 197 | self._values = values 198 | 199 | def __getattr__(self, name): 200 | try: 201 | return self._values[self._fields.index(name)] 202 | except (ValueError, IndexError): 203 | raise AttributeError(name) 204 | 205 | def __getitem__(self, name): 206 | try: 207 | return self._values[self._fields.index(name)] 208 | except (ValueError, IndexError): 209 | raise KeyError(name) 210 | 211 | def __setitem__(self, name, value): 212 | try: 213 | self._values[self._fields.index(name)] = value 214 | except (ValueError, IndexError): 215 | self._fields += (name,) 216 | self._values += (value,) 217 | 218 | def __contains__(self, name): 219 | return name in self._fields 220 | 221 | has_key = __contains__ 222 | 223 | def __sizeof__(self, name): 224 | return len(self._fields) 225 | 226 | def __iter__(self): 227 | return self._fields.__iter__() 228 | 229 | def __len__(self): 230 | return self._fields.__len__() 231 | 232 | def get(self, name, default=None): 233 | try: 234 | return self.__getitem__(name) 235 | except KeyError: 236 | return default 237 | 238 | def keys(self): 239 | for field in iter(self._fields): 240 | yield field 241 | 242 | def values(self): 243 | for value in iter(self._values): 244 | yield value 245 | 246 | def items(self): 247 | for item in zip(self._fields, self._values): 248 | yield item 249 | 250 | def __eq__(self, other): 251 | if isinstance(other, Row): 252 | return (dict(self.items()) == other) and all(map(operator.eq, self, other)) 253 | else: 254 | return dict(self.items()) == other 255 | 256 | def __ne__(self, other): 257 | return not self == other 258 | 259 | def __repr__(self, _repr_running={}): 260 | call_key = id(self), _get_ident() 261 | if call_key in _repr_running: 262 | return '...' 263 | _repr_running[call_key] = 1 264 | try: 265 | if not self: 266 | return '%s()' % (self.__class__.__name__,) 267 | return '%s(%r)' % (self.__class__.__name__, dict(self.items())) 268 | finally: 269 | del _repr_running[call_key] 270 | 271 | # for simplejson.dumps() 272 | def _asdict(self): 273 | return dict(self) 274 | 275 | def nope(self, *args, **kwargs): 276 | raise NotImplementedError('This object is partially immutable. To modify it, call "foo = dict(foo)" first.') 277 | 278 | update = nope 279 | pop = nope 280 | setdefault = nope 281 | fromkeys = nope 282 | clear = nope 283 | __delitem__ = nope 284 | __reversed__ = nope 285 | 286 | class SelectResult(list): 287 | def __init__(self, fieldnames, rows): 288 | self.fieldnames = tuple(fieldnames) 289 | self.rows = rows 290 | 291 | data = [Row(self.fieldnames, row) for row in self.rows] 292 | list.__init__(self, data) 293 | 294 | def width(self): 295 | return len(self.fieldnames) 296 | 297 | def __getitem__(self, i): 298 | if isinstance(i, slice): 299 | return SelectResult(self.fieldnames, self.rows[i]) 300 | return list.__getitem__(self, i) 301 | 302 | def escape_query(query, parameters): 303 | if parameters: 304 | if isinstance(parameters, (list, tuple)): 305 | query = query % tuple(map(_escape, parameters)) 306 | elif isinstance(parameters, dict): 307 | params = {} 308 | for key, val in parameters.items(): 309 | params[key] = _escape(val) 310 | 311 | query = query % params 312 | else: 313 | assert False, 'not sure what to do with parameters of type %s' % type(parameters) 314 | 315 | return query 316 | 317 | def _escape(param): 318 | def _bytes_to_utf8(b): 319 | return b.decode("utf-8") if isinstance(b, bytes) else b 320 | 321 | if isinstance(param, (list, tuple)): 322 | return ','.join(_bytes_to_utf8(_mysql.escape(p, CONVERSIONS)) for p in param) 323 | else: 324 | return _bytes_to_utf8(_mysql.escape(param, CONVERSIONS)) 325 | -------------------------------------------------------------------------------- /memsql/common/errorcodes.py: -------------------------------------------------------------------------------- 1 | """ This file lists every error code emitted by MemSQL along with the 2 | corresponding error number. 3 | """ 4 | 5 | ER_ERROR_FIRST = 1000 6 | ER_HASHCHK = 1000 7 | ER_NISAMCHK = 1001 8 | ER_NO = 1002 9 | ER_YES = 1003 10 | ER_CANT_CREATE_FILE = 1004 11 | ER_CANT_CREATE_TABLE = 1005 12 | ER_CANT_CREATE_DB = 1006 13 | ER_DB_CREATE_EXISTS = 1007 14 | ER_DB_DROP_EXISTS = 1008 15 | ER_DB_DROP_DELETE = 1009 16 | ER_DB_DROP_RMDIR = 1010 17 | ER_CANT_DELETE_FILE = 1011 18 | ER_CANT_FIND_SYSTEM_REC = 1012 19 | ER_CANT_GET_STAT = 1013 20 | ER_CANT_GET_WD = 1014 21 | ER_CANT_LOCK = 1015 22 | ER_CANT_OPEN_FILE = 1016 23 | ER_FILE_NOT_FOUND = 1017 24 | ER_CANT_READ_DIR = 1018 25 | ER_CANT_SET_WD = 1019 26 | ER_CHECKREAD = 1020 27 | ER_DISK_FULL = 1021 28 | ER_DUP_KEY = 1022 29 | ER_ERROR_ON_CLOSE = 1023 30 | ER_ERROR_ON_READ = 1024 31 | ER_ERROR_ON_RENAME = 1025 32 | ER_ERROR_ON_WRITE = 1026 33 | ER_FILE_USED = 1027 34 | ER_FILSORT_ABORT = 1028 35 | ER_FORM_NOT_FOUND = 1029 36 | ER_GET_ERRNO = 1030 37 | ER_ILLEGAL_HA = 1031 38 | ER_KEY_NOT_FOUND = 1032 39 | ER_NOT_FORM_FILE = 1033 40 | ER_NOT_KEYFILE = 1034 41 | ER_OLD_KEYFILE = 1035 42 | ER_OPEN_AS_READONLY = 1036 43 | ER_OUTOFMEMORY = 1037 44 | ER_OUT_OF_SORTMEMORY = 1038 45 | ER_UNEXPECTED_EOF = 1039 46 | ER_CON_COUNT_ERROR = 1040 47 | ER_OUT_OF_RESOURCES = 1041 48 | ER_BAD_HOST_ERROR = 1042 49 | ER_HANDSHAKE_ERROR = 1043 50 | ER_DBACCESS_DENIED_ERROR = 1044 51 | ER_ACCESS_DENIED_ERROR = 1045 52 | ER_NO_DB_ERROR = 1046 53 | ER_UNKNOWN_COM_ERROR = 1047 54 | ER_BAD_NULL_ERROR = 1048 55 | ER_BAD_DB_ERROR = 1049 56 | ER_TABLE_EXISTS_ERROR = 1050 57 | ER_BAD_TABLE_ERROR = 1051 58 | ER_NON_UNIQ_ERROR = 1052 59 | ER_SERVER_SHUTDOWN = 1053 60 | ER_BAD_FIELD_ERROR = 1054 61 | ER_WRONG_FIELD_WITH_GROUP = 1055 62 | ER_WRONG_GROUP_FIELD = 1056 63 | ER_WRONG_SUM_SELECT = 1057 64 | ER_WRONG_VALUE_COUNT = 1058 65 | ER_TOO_LONG_IDENT = 1059 66 | ER_DUP_FIELDNAME = 1060 67 | ER_DUP_KEYNAME = 1061 68 | ER_DUP_ENTRY = 1062 69 | ER_WRONG_FIELD_SPEC = 1063 70 | ER_PARSE_ERROR = 1064 71 | ER_EMPTY_QUERY = 1065 72 | ER_NONUNIQ_TABLE = 1066 73 | ER_INVALID_DEFAULT = 1067 74 | ER_MULTIPLE_PRI_KEY = 1068 75 | ER_TOO_MANY_KEYS = 1069 76 | ER_TOO_MANY_KEY_PARTS = 1070 77 | ER_TOO_LONG_KEY = 1071 78 | ER_KEY_COLUMN_DOES_NOT_EXITS = 1072 79 | ER_BLOB_USED_AS_KEY = 1073 80 | ER_TOO_BIG_FIELDLENGTH = 1074 81 | ER_WRONG_AUTO_KEY = 1075 82 | ER_READY = 1076 83 | ER_NORMAL_SHUTDOWN = 1077 84 | ER_GOT_SIGNAL = 1078 85 | ER_SHUTDOWN_COMPLETE = 1079 86 | ER_FORCING_CLOSE = 1080 87 | ER_IPSOCK_ERROR = 1081 88 | ER_NO_SUCH_INDEX = 1082 89 | ER_WRONG_FIELD_TERMINATORS = 1083 90 | ER_BLOBS_AND_NO_TERMINATED = 1084 91 | ER_TEXTFILE_NOT_READABLE = 1085 92 | ER_FILE_EXISTS_ERROR = 1086 93 | ER_LOAD_INFO = 1087 94 | ER_ALTER_INFO = 1088 95 | ER_WRONG_SUB_KEY = 1089 96 | ER_CANT_REMOVE_ALL_FIELDS = 1090 97 | ER_CANT_DROP_FIELD_OR_KEY = 1091 98 | ER_INSERT_INFO = 1092 99 | ER_UPDATE_TABLE_USED = 1093 100 | ER_NO_SUCH_THREAD = 1094 101 | ER_KILL_DENIED_ERROR = 1095 102 | ER_NO_TABLES_USED = 1096 103 | ER_TOO_BIG_SET = 1097 104 | ER_NO_UNIQUE_LOGFILE = 1098 105 | ER_TABLE_NOT_LOCKED_FOR_WRITE = 1099 106 | ER_TABLE_NOT_LOCKED = 1100 107 | ER_BLOB_CANT_HAVE_DEFAULT = 1101 108 | ER_WRONG_DB_NAME = 1102 109 | ER_WRONG_TABLE_NAME = 1103 110 | ER_TOO_BIG_SELECT = 1104 111 | ER_UNKNOWN_ERROR = 1105 112 | ER_UNKNOWN_PROCEDURE = 1106 113 | ER_WRONG_PARAMCOUNT_TO_PROCEDURE = 1107 114 | ER_WRONG_PARAMETERS_TO_PROCEDURE = 1108 115 | ER_UNKNOWN_TABLE = 1109 116 | ER_FIELD_SPECIFIED_TWICE = 1110 117 | ER_INVALID_GROUP_FUNC_USE = 1111 118 | ER_UNSUPPORTED_EXTENSION = 1112 119 | ER_TABLE_MUST_HAVE_COLUMNS = 1113 120 | ER_RECORD_FILE_FULL = 1114 121 | ER_UNKNOWN_CHARACTER_SET = 1115 122 | ER_TOO_MANY_TABLES = 1116 123 | ER_TOO_MANY_FIELDS = 1117 124 | ER_TOO_BIG_ROWSIZE = 1118 125 | ER_STACK_OVERRUN = 1119 126 | ER_WRONG_OUTER_JOIN = 1120 127 | ER_NULL_COLUMN_IN_INDEX = 1121 128 | ER_CANT_FIND_UDF = 1122 129 | ER_CANT_INITIALIZE_UDF = 1123 130 | ER_UDF_NO_PATHS = 1124 131 | ER_UDF_EXISTS = 1125 132 | ER_CANT_OPEN_LIBRARY = 1126 133 | ER_CANT_FIND_DL_ENTRY = 1127 134 | ER_FUNCTION_NOT_DEFINED = 1128 135 | ER_HOST_IS_BLOCKED = 1129 136 | ER_HOST_NOT_PRIVILEGED = 1130 137 | ER_PASSWORD_ANONYMOUS_USER = 1131 138 | ER_PASSWORD_NOT_ALLOWED = 1132 139 | ER_PASSWORD_NO_MATCH = 1133 140 | ER_UPDATE_INFO = 1134 141 | ER_CANT_CREATE_THREAD = 1135 142 | ER_WRONG_VALUE_COUNT_ON_ROW = 1136 143 | ER_CANT_REOPEN_TABLE = 1137 144 | ER_INVALID_USE_OF_NULL = 1138 145 | ER_REGEXP_ERROR = 1139 146 | ER_MIX_OF_GROUP_FUNC_AND_FIELDS = 1140 147 | ER_NONEXISTING_GRANT = 1141 148 | ER_TABLEACCESS_DENIED_ERROR = 1142 149 | ER_COLUMNACCESS_DENIED_ERROR = 1143 150 | ER_ILLEGAL_GRANT_FOR_TABLE = 1144 151 | ER_GRANT_WRONG_HOST_OR_USER = 1145 152 | ER_NO_SUCH_TABLE = 1146 153 | ER_NONEXISTING_TABLE_GRANT = 1147 154 | ER_NOT_ALLOWED_COMMAND = 1148 155 | ER_SYNTAX_ERROR = 1149 156 | ER_DELAYED_CANT_CHANGE_LOCK = 1150 157 | ER_TOO_MANY_DELAYED_THREADS = 1151 158 | ER_ABORTING_CONNECTION = 1152 159 | ER_NET_PACKET_TOO_LARGE = 1153 160 | ER_NET_READ_ERROR_FROM_PIPE = 1154 161 | ER_NET_FCNTL_ERROR = 1155 162 | ER_NET_PACKETS_OUT_OF_ORDER = 1156 163 | ER_NET_UNCOMPRESS_ERROR = 1157 164 | ER_NET_READ_ERROR = 1158 165 | ER_NET_READ_INTERRUPTED = 1159 166 | ER_NET_ERROR_ON_WRITE = 1160 167 | ER_NET_WRITE_INTERRUPTED = 1161 168 | ER_TOO_LONG_STRING = 1162 169 | ER_TABLE_CANT_HANDLE_BLOB = 1163 170 | ER_TABLE_CANT_HANDLE_AUTO_INCREMENT = 1164 171 | ER_DELAYED_INSERT_TABLE_LOCKED = 1165 172 | ER_WRONG_COLUMN_NAME = 1166 173 | ER_WRONG_KEY_COLUMN = 1167 174 | ER_WRONG_MRG_TABLE = 1168 175 | ER_DUP_UNIQUE = 1169 176 | ER_BLOB_KEY_WITHOUT_LENGTH = 1170 177 | ER_PRIMARY_CANT_HAVE_NULL = 1171 178 | ER_TOO_MANY_ROWS = 1172 179 | ER_REQUIRES_PRIMARY_KEY = 1173 180 | ER_NO_RAID_COMPILED = 1174 181 | ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE = 1175 182 | ER_KEY_DOES_NOT_EXITS = 1176 183 | ER_CHECK_NO_SUCH_TABLE = 1177 184 | ER_CHECK_NOT_IMPLEMENTED = 1178 185 | ER_CANT_DO_THIS_DURING_AN_TRANSACTION = 1179 186 | ER_ERROR_DURING_COMMIT = 1180 187 | ER_ERROR_DURING_ROLLBACK = 1181 188 | ER_ERROR_DURING_FLUSH_LOGS = 1182 189 | ER_ERROR_DURING_CHECKPOINT = 1183 190 | ER_NEW_ABORTING_CONNECTION = 1184 191 | ER_DUMP_NOT_IMPLEMENTED = 1185 192 | ER_FLUSH_MASTER_BINLOG_CLOSED = 1186 193 | ER_INDEX_REBUILD = 1187 194 | ER_MASTER = 1188 195 | ER_MASTER_NET_READ = 1189 196 | ER_MASTER_NET_WRITE = 1190 197 | ER_FT_MATCHING_KEY_NOT_FOUND = 1191 198 | ER_LOCK_OR_ACTIVE_TRANSACTION = 1192 199 | ER_UNKNOWN_SYSTEM_VARIABLE = 1193 200 | ER_CRASHED_ON_USAGE = 1194 201 | ER_CRASHED_ON_REPAIR = 1195 202 | ER_WARNING_NOT_COMPLETE_ROLLBACK = 1196 203 | ER_TRANS_CACHE_FULL = 1197 204 | ER_SLAVE_MUST_STOP = 1198 205 | ER_SLAVE_NOT_RUNNING = 1199 206 | ER_BAD_SLAVE = 1200 207 | ER_MASTER_INFO = 1201 208 | ER_SLAVE_THREAD = 1202 209 | ER_TOO_MANY_USER_CONNECTIONS = 1203 210 | ER_SET_CONSTANTS_ONLY = 1204 211 | ER_LOCK_WAIT_TIMEOUT = 1205 212 | ER_LOCK_TABLE_FULL = 1206 213 | ER_READ_ONLY_TRANSACTION = 1207 214 | ER_DROP_DB_WITH_READ_LOCK = 1208 215 | ER_CREATE_DB_WITH_READ_LOCK = 1209 216 | ER_WRONG_ARGUMENTS = 1210 217 | ER_NO_PERMISSION_TO_CREATE_USER = 1211 218 | ER_UNION_TABLES_IN_DIFFERENT_DIR = 1212 219 | ER_LOCK_DEADLOCK = 1213 220 | ER_TABLE_CANT_HANDLE_FT = 1214 221 | ER_CANNOT_ADD_FOREIGN = 1215 222 | ER_NO_REFERENCED_ROW = 1216 223 | ER_ROW_IS_REFERENCED = 1217 224 | ER_CONNECT_TO_MASTER = 1218 225 | ER_QUERY_ON_MASTER = 1219 226 | ER_ERROR_WHEN_EXECUTING_COMMAND = 1220 227 | ER_WRONG_USAGE = 1221 228 | ER_WRONG_NUMBER_OF_COLUMNS_IN_SELECT = 1222 229 | ER_CANT_UPDATE_WITH_READLOCK = 1223 230 | ER_MIXING_NOT_ALLOWED = 1224 231 | ER_DUP_ARGUMENT = 1225 232 | ER_USER_LIMIT_REACHED = 1226 233 | ER_SPECIFIC_ACCESS_DENIED_ERROR = 1227 234 | ER_LOCAL_VARIABLE = 1228 235 | ER_GLOBAL_VARIABLE = 1229 236 | ER_NO_DEFAULT = 1230 237 | ER_WRONG_VALUE_FOR_VAR = 1231 238 | ER_WRONG_TYPE_FOR_VAR = 1232 239 | ER_VAR_CANT_BE_READ = 1233 240 | ER_CANT_USE_OPTION_HERE = 1234 241 | ER_NOT_SUPPORTED_YET = 1235 242 | ER_MASTER_FATAL_ERROR_READING_BINLOG = 1236 243 | ER_SLAVE_IGNORED_TABLE = 1237 244 | ER_INCORRECT_GLOBAL_LOCAL_VAR = 1238 245 | ER_WRONG_FK_DEF = 1239 246 | ER_KEY_REF_DO_NOT_MATCH_TABLE_REF = 1240 247 | ER_OPERAND_COLUMNS = 1241 248 | ER_SUBQUERY_NO_1_ROW = 1242 249 | ER_UNKNOWN_STMT_HANDLER = 1243 250 | ER_CORRUPT_HELP_DB = 1244 251 | ER_CYCLIC_REFERENCE = 1245 252 | ER_AUTO_CONVERT = 1246 253 | ER_ILLEGAL_REFERENCE = 1247 254 | ER_DERIVED_MUST_HAVE_ALIAS = 1248 255 | ER_SELECT_REDUCED = 1249 256 | ER_TABLENAME_NOT_ALLOWED_HERE = 1250 257 | ER_NOT_SUPPORTED_AUTH_MODE = 1251 258 | ER_SPATIAL_CANT_HAVE_NULL = 1252 259 | ER_COLLATION_CHARSET_MISMATCH = 1253 260 | ER_SLAVE_WAS_RUNNING = 1254 261 | ER_SLAVE_WAS_NOT_RUNNING = 1255 262 | ER_TOO_BIG_FOR_UNCOMPRESS = 1256 263 | ER_ZLIB_Z_MEM_ERROR = 1257 264 | ER_ZLIB_Z_BUF_ERROR = 1258 265 | ER_ZLIB_Z_DATA_ERROR = 1259 266 | ER_CUT_VALUE_GROUP_CONCAT = 1260 267 | ER_WARN_TOO_FEW_RECORDS = 1261 268 | ER_WARN_TOO_MANY_RECORDS = 1262 269 | ER_WARN_NULL_TO_NOTNULL = 1263 270 | ER_WARN_DATA_OUT_OF_RANGE = 1264 271 | WARN_DATA_TRUNCATED = 1265 272 | ER_WARN_USING_OTHER_HANDLER = 1266 273 | ER_CANT_AGGREGATE_2COLLATIONS = 1267 274 | ER_DROP_USER = 1268 275 | ER_REVOKE_GRANTS = 1269 276 | ER_CANT_AGGREGATE_3COLLATIONS = 1270 277 | ER_CANT_AGGREGATE_NCOLLATIONS = 1271 278 | ER_VARIABLE_IS_NOT_STRUCT = 1272 279 | ER_UNKNOWN_COLLATION = 1273 280 | ER_SLAVE_IGNORED_SSL_PARAMS = 1274 281 | ER_SERVER_IS_IN_SECURE_AUTH_MODE = 1275 282 | ER_WARN_FIELD_RESOLVED = 1276 283 | ER_BAD_SLAVE_UNTIL_COND = 1277 284 | ER_MISSING_SKIP_SLAVE = 1278 285 | ER_UNTIL_COND_IGNORED = 1279 286 | ER_WRONG_NAME_FOR_INDEX = 1280 287 | ER_WRONG_NAME_FOR_CATALOG = 1281 288 | ER_WARN_QC_RESIZE = 1282 289 | ER_BAD_FT_COLUMN = 1283 290 | ER_UNKNOWN_KEY_CACHE = 1284 291 | ER_WARN_HOSTNAME_WONT_WORK = 1285 292 | ER_UNKNOWN_STORAGE_ENGINE = 1286 293 | ER_WARN_DEPRECATED_SYNTAX = 1287 294 | ER_NON_UPDATABLE_TABLE = 1288 295 | ER_FEATURE_DISABLED = 1289 296 | ER_OPTION_PREVENTS_STATEMENT = 1290 297 | ER_DUPLICATED_VALUE_IN_TYPE = 1291 298 | ER_TRUNCATED_WRONG_VALUE = 1292 299 | ER_TOO_MUCH_AUTO_TIMESTAMP_COLS = 1293 300 | ER_INVALID_ON_UPDATE = 1294 301 | ER_UNSUPPORTED_PS = 1295 302 | ER_GET_ERRMSG = 1296 303 | ER_GET_TEMPORARY_ERRMSG = 1297 304 | ER_UNKNOWN_TIME_ZONE = 1298 305 | ER_WARN_INVALID_TIMESTAMP = 1299 306 | ER_INVALID_CHARACTER_STRING = 1300 307 | ER_WARN_ALLOWED_PACKET_OVERFLOWED = 1301 308 | ER_CONFLICTING_DECLARATIONS = 1302 309 | ER_SP_NO_RECURSIVE_CREATE = 1303 310 | ER_SP_ALREADY_EXISTS = 1304 311 | ER_SP_DOES_NOT_EXIST = 1305 312 | ER_SP_DROP_FAILED = 1306 313 | ER_SP_STORE_FAILED = 1307 314 | ER_SP_LILABEL_MISMATCH = 1308 315 | ER_SP_LABEL_REDEFINE = 1309 316 | ER_SP_LABEL_MISMATCH = 1310 317 | ER_SP_UNINIT_VAR = 1311 318 | ER_SP_BADSELECT = 1312 319 | ER_SP_BADRETURN = 1313 320 | ER_SP_BADSTATEMENT = 1314 321 | ER_UPDATE_LOG_DEPRECATED_IGNORED = 1315 322 | ER_UPDATE_LOG_DEPRECATED_TRANSLATED = 1316 323 | ER_QUERY_INTERRUPTED = 1317 324 | ER_SP_WRONG_NO_OF_ARGS = 1318 325 | ER_SP_COND_MISMATCH = 1319 326 | ER_SP_NORETURN = 1320 327 | ER_SP_NORETURNEND = 1321 328 | ER_SP_BAD_CURSOR_QUERY = 1322 329 | ER_SP_BAD_CURSOR_SELECT = 1323 330 | ER_SP_CURSOR_MISMATCH = 1324 331 | ER_SP_CURSOR_ALREADY_OPEN = 1325 332 | ER_SP_CURSOR_NOT_OPEN = 1326 333 | ER_SP_UNDECLARED_VAR = 1327 334 | ER_SP_WRONG_NO_OF_FETCH_ARGS = 1328 335 | ER_SP_FETCH_NO_DATA = 1329 336 | ER_SP_DUP_PARAM = 1330 337 | ER_SP_DUP_VAR = 1331 338 | ER_SP_DUP_COND = 1332 339 | ER_SP_DUP_CURS = 1333 340 | ER_SP_CANT_ALTER = 1334 341 | ER_SP_SUBSELECT_NYI = 1335 342 | ER_STMT_NOT_ALLOWED_IN_SF_OR_TRG = 1336 343 | ER_SP_VARCOND_AFTER_CURSHNDLR = 1337 344 | ER_SP_CURSOR_AFTER_HANDLER = 1338 345 | ER_SP_CASE_NOT_FOUND = 1339 346 | ER_FPARSER_TOO_BIG_FILE = 1340 347 | ER_FPARSER_BAD_HEADER = 1341 348 | ER_FPARSER_EOF_IN_COMMENT = 1342 349 | ER_FPARSER_ERROR_IN_PARAMETER = 1343 350 | ER_FPARSER_EOF_IN_UNKNOWN_PARAMETER = 1344 351 | ER_VIEW_NO_EXPLAIN = 1345 352 | ER_FRM_UNKNOWN_TYPE = 1346 353 | ER_WRONG_OBJECT = 1347 354 | ER_NONUPDATEABLE_COLUMN = 1348 355 | ER_VIEW_SELECT_DERIVED = 1349 356 | ER_VIEW_SELECT_CLAUSE = 1350 357 | ER_VIEW_SELECT_VARIABLE = 1351 358 | ER_VIEW_SELECT_TMPTABLE = 1352 359 | ER_VIEW_WRONG_LIST = 1353 360 | ER_WARN_VIEW_MERGE = 1354 361 | ER_WARN_VIEW_WITHOUT_KEY = 1355 362 | ER_VIEW_INVALID = 1356 363 | ER_SP_NO_DROP_SP = 1357 364 | ER_SP_GOTO_IN_HNDLR = 1358 365 | ER_TRG_ALREADY_EXISTS = 1359 366 | ER_TRG_DOES_NOT_EXIST = 1360 367 | ER_TRG_ON_VIEW_OR_TEMP_TABLE = 1361 368 | ER_TRG_CANT_CHANGE_ROW = 1362 369 | ER_TRG_NO_SUCH_ROW_IN_TRG = 1363 370 | ER_NO_DEFAULT_FOR_FIELD = 1364 371 | ER_DIVISION_BY_ZERO = 1365 372 | ER_TRUNCATED_WRONG_VALUE_FOR_FIELD = 1366 373 | ER_ILLEGAL_VALUE_FOR_TYPE = 1367 374 | ER_VIEW_NONUPD_CHECK = 1368 375 | ER_VIEW_CHECK_FAILED = 1369 376 | ER_PROCACCESS_DENIED_ERROR = 1370 377 | ER_RELAY_LOG_FAIL = 1371 378 | ER_PASSWD_LENGTH = 1372 379 | ER_UNKNOWN_TARGET_BINLOG = 1373 380 | ER_IO_ERR_LOG_INDEX_READ = 1374 381 | ER_BINLOG_PURGE_PROHIBITED = 1375 382 | ER_FSEEK_FAIL = 1376 383 | ER_BINLOG_PURGE_FATAL_ERR = 1377 384 | ER_LOG_IN_USE = 1378 385 | ER_LOG_PURGE_UNKNOWN_ERR = 1379 386 | ER_RELAY_LOG_INIT = 1380 387 | ER_NO_BINARY_LOGGING = 1381 388 | ER_RESERVED_SYNTAX = 1382 389 | ER_WSAS_FAILED = 1383 390 | ER_DIFF_GROUPS_PROC = 1384 391 | ER_NO_GROUP_FOR_PROC = 1385 392 | ER_ORDER_WITH_PROC = 1386 393 | ER_LOGGING_PROHIBIT_CHANGING_OF = 1387 394 | ER_NO_FILE_MAPPING = 1388 395 | ER_WRONG_MAGIC = 1389 396 | ER_PS_MANY_PARAM = 1390 397 | ER_KEY_PART_0 = 1391 398 | ER_VIEW_CHECKSUM = 1392 399 | ER_VIEW_MULTIUPDATE = 1393 400 | ER_VIEW_NO_INSERT_FIELD_LIST = 1394 401 | ER_VIEW_DELETE_MERGE_VIEW = 1395 402 | ER_CANNOT_USER = 1396 403 | ER_XAER_NOTA = 1397 404 | ER_XAER_INVAL = 1398 405 | ER_XAER_RMFAIL = 1399 406 | ER_XAER_OUTSIDE = 1400 407 | ER_XAER_RMERR = 1401 408 | ER_XA_RBROLLBACK = 1402 409 | ER_NONEXISTING_PROC_GRANT = 1403 410 | ER_PROC_AUTO_GRANT_FAIL = 1404 411 | ER_PROC_AUTO_REVOKE_FAIL = 1405 412 | ER_DATA_TOO_LONG = 1406 413 | ER_SP_BAD_SQLSTATE = 1407 414 | ER_STARTUP = 1408 415 | ER_LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR = 1409 416 | ER_CANT_CREATE_USER_WITH_GRANT = 1410 417 | ER_WRONG_VALUE_FOR_TYPE = 1411 418 | ER_TABLE_DEF_CHANGED = 1412 419 | ER_SP_DUP_HANDLER = 1413 420 | ER_SP_NOT_VAR_ARG = 1414 421 | ER_SP_NO_RETSET = 1415 422 | ER_CANT_CREATE_GEOMETRY_OBJECT = 1416 423 | ER_FAILED_ROUTINE_BREAK_BINLOG = 1417 424 | ER_BINLOG_UNSAFE_ROUTINE = 1418 425 | ER_BINLOG_CREATE_ROUTINE_NEED_SUPER = 1419 426 | ER_EXEC_STMT_WITH_OPEN_CURSOR = 1420 427 | ER_STMT_HAS_NO_OPEN_CURSOR = 1421 428 | ER_COMMIT_NOT_ALLOWED_IN_SF_OR_TRG = 1422 429 | ER_NO_DEFAULT_FOR_VIEW_FIELD = 1423 430 | ER_SP_NO_RECURSION = 1424 431 | ER_TOO_BIG_SCALE = 1425 432 | ER_TOO_BIG_PRECISION = 1426 433 | ER_M_BIGGER_THAN_D = 1427 434 | ER_WRONG_LOCK_OF_SYSTEM_TABLE = 1428 435 | ER_CONNECT_TO_FOREIGN_DATA_SOURCE = 1429 436 | ER_QUERY_ON_FOREIGN_DATA_SOURCE = 1430 437 | ER_FOREIGN_DATA_SOURCE_DOESNT_EXIST = 1431 438 | ER_FOREIGN_DATA_STRING_INVALID_CANT_CREATE = 1432 439 | ER_FOREIGN_DATA_STRING_INVALID = 1433 440 | ER_CANT_CREATE_FEDERATED_TABLE = 1434 441 | ER_TRG_IN_WRONG_SCHEMA = 1435 442 | ER_STACK_OVERRUN_NEED_MORE = 1436 443 | ER_TOO_LONG_BODY = 1437 444 | ER_WARN_CANT_DROP_DEFAULT_KEYCACHE = 1438 445 | ER_TOO_BIG_DISPLAYWIDTH = 1439 446 | ER_XAER_DUPID = 1440 447 | ER_DATETIME_FUNCTION_OVERFLOW = 1441 448 | ER_CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG = 1442 449 | ER_VIEW_PREVENT_UPDATE = 1443 450 | ER_PS_NO_RECURSION = 1444 451 | ER_SP_CANT_SET_AUTOCOMMIT = 1445 452 | ER_MALFORMED_DEFINER = 1446 453 | ER_VIEW_FRM_NO_USER = 1447 454 | ER_VIEW_OTHER_USER = 1448 455 | ER_NO_SUCH_USER = 1449 456 | ER_FORBID_SCHEMA_CHANGE = 1450 457 | ER_ROW_IS_REFERENCED_2 = 1451 458 | ER_NO_REFERENCED_ROW_2 = 1452 459 | ER_SP_BAD_VAR_SHADOW = 1453 460 | ER_TRG_NO_DEFINER = 1454 461 | ER_OLD_FILE_FORMAT = 1455 462 | ER_SP_RECURSION_LIMIT = 1456 463 | ER_SP_PROC_TABLE_CORRUPT = 1457 464 | ER_SP_WRONG_NAME = 1458 465 | ER_TABLE_NEEDS_UPGRADE = 1459 466 | ER_SP_NO_AGGREGATE = 1460 467 | ER_MAX_PREPARED_STMT_COUNT_REACHED = 1461 468 | ER_VIEW_RECURSIVE = 1462 469 | ER_NON_GROUPING_FIELD_USED = 1463 470 | ER_TABLE_CANT_HANDLE_SPKEYS = 1464 471 | ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA = 1465 472 | ER_REMOVED_SPACES = 1466 473 | ER_AUTOINC_READ_FAILED = 1467 474 | ER_USERNAME = 1468 475 | ER_HOSTNAME = 1469 476 | ER_WRONG_STRING_LENGTH = 1470 477 | ER_NON_INSERTABLE_TABLE = 1471 478 | ER_ADMIN_WRONG_MRG_TABLE = 1472 479 | ER_TOO_HIGH_LEVEL_OF_NESTING_FOR_SELECT = 1473 480 | ER_NAME_BECOMES_EMPTY = 1474 481 | ER_AMBIGUOUS_FIELD_TERM = 1475 482 | ER_FOREIGN_SERVER_EXISTS = 1476 483 | ER_FOREIGN_SERVER_DOESNT_EXIST = 1477 484 | ER_ILLEGAL_HA_CREATE_OPTION = 1478 485 | ER_PARTITION_REQUIRES_VALUES_ERROR = 1479 486 | ER_PARTITION_WRONG_VALUES_ERROR = 1480 487 | ER_PARTITION_MAXVALUE_ERROR = 1481 488 | ER_PARTITION_SUBPARTITION_ERROR = 1482 489 | ER_PARTITION_SUBPART_MIX_ERROR = 1483 490 | ER_PARTITION_WRONG_NO_PART_ERROR = 1484 491 | ER_PARTITION_WRONG_NO_SUBPART_ERROR = 1485 492 | ER_WRONG_EXPR_IN_PARTITION_FUNC_ERROR = 1486 493 | ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR = 1487 494 | ER_FIELD_NOT_FOUND_PART_ERROR = 1488 495 | ER_LIST_OF_FIELDS_ONLY_IN_HASH_ERROR = 1489 496 | ER_INCONSISTENT_PARTITION_INFO_ERROR = 1490 497 | ER_PARTITION_FUNC_NOT_ALLOWED_ERROR = 1491 498 | ER_PARTITIONS_MUST_BE_DEFINED_ERROR = 1492 499 | ER_RANGE_NOT_INCREASING_ERROR = 1493 500 | ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR = 1494 501 | ER_MULTIPLE_DEF_CONST_IN_LIST_PART_ERROR = 1495 502 | ER_PARTITION_ENTRY_ERROR = 1496 503 | ER_MIX_HANDLER_ERROR = 1497 504 | ER_PARTITION_NOT_DEFINED_ERROR = 1498 505 | ER_TOO_MANY_PARTITIONS_ERROR = 1499 506 | ER_SUBPARTITION_ERROR = 1500 507 | ER_CANT_CREATE_HANDLER_FILE = 1501 508 | ER_BLOB_FIELD_IN_PART_FUNC_ERROR = 1502 509 | ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF = 1503 510 | ER_NO_PARTS_ERROR = 1504 511 | ER_PARTITION_MGMT_ON_NONPARTITIONED = 1505 512 | ER_FOREIGN_KEY_ON_PARTITIONED = 1506 513 | ER_DROP_PARTITION_NON_EXISTENT = 1507 514 | ER_DROP_LAST_PARTITION = 1508 515 | ER_COALESCE_ONLY_ON_HASH_PARTITION = 1509 516 | ER_REORG_HASH_ONLY_ON_SAME_NO = 1510 517 | ER_REORG_NO_PARAM_ERROR = 1511 518 | ER_ONLY_ON_RANGE_LIST_PARTITION = 1512 519 | ER_ADD_PARTITION_SUBPART_ERROR = 1513 520 | ER_ADD_PARTITION_NO_NEW_PARTITION = 1514 521 | ER_COALESCE_PARTITION_NO_PARTITION = 1515 522 | ER_REORG_PARTITION_NOT_EXIST = 1516 523 | ER_SAME_NAME_PARTITION = 1517 524 | ER_NO_BINLOG_ERROR = 1518 525 | ER_CONSECUTIVE_REORG_PARTITIONS = 1519 526 | ER_REORG_OUTSIDE_RANGE = 1520 527 | ER_PARTITION_FUNCTION_FAILURE = 1521 528 | ER_PART_STATE_ERROR = 1522 529 | ER_LIMITED_PART_RANGE = 1523 530 | ER_PLUGIN_IS_NOT_LOADED = 1524 531 | ER_WRONG_VALUE = 1525 532 | ER_NO_PARTITION_FOR_GIVEN_VALUE = 1526 533 | ER_FILEGROUP_OPTION_ONLY_ONCE = 1527 534 | ER_CREATE_FILEGROUP_FAILED = 1528 535 | ER_DROP_FILEGROUP_FAILED = 1529 536 | ER_TABLESPACE_AUTO_EXTEND_ERROR = 1530 537 | ER_WRONG_SIZE_NUMBER = 1531 538 | ER_SIZE_OVERFLOW_ERROR = 1532 539 | ER_ALTER_FILEGROUP_FAILED = 1533 540 | ER_BINLOG_ROW_LOGGING_FAILED = 1534 541 | ER_BINLOG_ROW_WRONG_TABLE_DEF = 1535 542 | ER_BINLOG_ROW_RBR_TO_SBR = 1536 543 | ER_EVENT_ALREADY_EXISTS = 1537 544 | ER_EVENT_STORE_FAILED = 1538 545 | ER_EVENT_DOES_NOT_EXIST = 1539 546 | ER_EVENT_CANT_ALTER = 1540 547 | ER_EVENT_DROP_FAILED = 1541 548 | ER_EVENT_INTERVAL_NOT_POSITIVE_OR_TOO_BIG = 1542 549 | ER_EVENT_ENDS_BEFORE_STARTS = 1543 550 | ER_EVENT_EXEC_TIME_IN_THE_PAST = 1544 551 | ER_EVENT_OPEN_TABLE_FAILED = 1545 552 | ER_EVENT_NEITHER_M_EXPR_NOR_M_AT = 1546 553 | ER_COL_COUNT_DOESNT_MATCH_CORRUPTED = 1547 554 | ER_CANNOT_LOAD_FROM_TABLE = 1548 555 | ER_EVENT_CANNOT_DELETE = 1549 556 | ER_EVENT_COMPILE_ERROR = 1550 557 | ER_EVENT_SAME_NAME = 1551 558 | ER_EVENT_DATA_TOO_LONG = 1552 559 | ER_DROP_INDEX_FK = 1553 560 | ER_WARN_DEPRECATED_SYNTAX_WITH_VER = 1554 561 | ER_CANT_WRITE_LOCK_LOG_TABLE = 1555 562 | ER_CANT_LOCK_LOG_TABLE = 1556 563 | ER_FOREIGN_DUPLICATE_KEY = 1557 564 | ER_COL_COUNT_DOESNT_MATCH_PLEASE_UPDATE = 1558 565 | ER_TEMP_TABLE_PREVENTS_SWITCH_OUT_OF_RBR = 1559 566 | ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_FORMAT = 1560 567 | ER_NDB_CANT_SWITCH_BINLOG_FORMAT = 1561 568 | ER_PARTITION_NO_TEMPORARY = 1562 569 | ER_PARTITION_CONST_DOMAIN_ERROR = 1563 570 | ER_PARTITION_FUNCTION_IS_NOT_ALLOWED = 1564 571 | ER_DDL_LOG_ERROR = 1565 572 | ER_NULL_IN_VALUES_LESS_THAN = 1566 573 | ER_WRONG_PARTITION_NAME = 1567 574 | ER_CANT_CHANGE_TX_ISOLATION = 1568 575 | ER_DUP_ENTRY_AUTOINCREMENT_CASE = 1569 576 | ER_EVENT_MODIFY_QUEUE_ERROR = 1570 577 | ER_EVENT_SET_VAR_ERROR = 1571 578 | ER_PARTITION_MERGE_ERROR = 1572 579 | ER_CANT_ACTIVATE_LOG = 1573 580 | ER_RBR_NOT_AVAILABLE = 1574 581 | ER_BASE64_DECODE_ERROR = 1575 582 | ER_EVENT_RECURSION_FORBIDDEN = 1576 583 | ER_EVENTS_DB_ERROR = 1577 584 | ER_ONLY_INTEGERS_ALLOWED = 1578 585 | ER_UNSUPORTED_LOG_ENGINE = 1579 586 | ER_BAD_LOG_STATEMENT = 1580 587 | ER_CANT_RENAME_LOG_TABLE = 1581 588 | ER_WRONG_PARAMCOUNT_TO_NATIVE_FCT = 1582 589 | ER_WRONG_PARAMETERS_TO_NATIVE_FCT = 1583 590 | ER_WRONG_PARAMETERS_TO_STORED_FCT = 1584 591 | ER_NATIVE_FCT_NAME_COLLISION = 1585 592 | ER_DUP_ENTRY_WITH_KEY_NAME = 1586 593 | ER_BINLOG_PURGE_EMFILE = 1587 594 | ER_EVENT_CANNOT_CREATE_IN_THE_PAST = 1588 595 | ER_EVENT_CANNOT_ALTER_IN_THE_PAST = 1589 596 | ER_SLAVE_INCIDENT = 1590 597 | ER_NO_PARTITION_FOR_GIVEN_VALUE_SILENT = 1591 598 | ER_BINLOG_UNSAFE_STATEMENT = 1592 599 | ER_SLAVE_FATAL_ERROR = 1593 600 | ER_SLAVE_RELAY_LOG_READ_FAILURE = 1594 601 | ER_SLAVE_RELAY_LOG_WRITE_FAILURE = 1595 602 | ER_SLAVE_CREATE_EVENT_FAILURE = 1596 603 | ER_SLAVE_MASTER_COM_FAILURE = 1597 604 | ER_BINLOG_LOGGING_IMPOSSIBLE = 1598 605 | ER_VIEW_NO_CREATION_CTX = 1599 606 | ER_VIEW_INVALID_CREATION_CTX = 1600 607 | ER_SR_INVALID_CREATION_CTX = 1601 608 | ER_TRG_CORRUPTED_FILE = 1602 609 | ER_TRG_NO_CREATION_CTX = 1603 610 | ER_TRG_INVALID_CREATION_CTX = 1604 611 | ER_EVENT_INVALID_CREATION_CTX = 1605 612 | ER_TRG_CANT_OPEN_TABLE = 1606 613 | ER_CANT_CREATE_SROUTINE = 1607 614 | ER_NEVER_USED = 1608 615 | ER_NO_FORMAT_DESCRIPTION_EVENT_BEFORE_BINLOG_STATEMENT = 1609 616 | ER_SLAVE_CORRUPT_EVENT = 1610 617 | ER_LOAD_DATA_INVALID_COLUMN = 1611 618 | ER_LOG_PURGE_NO_FILE = 1612 619 | ER_XA_RBTIMEOUT = 1613 620 | ER_XA_RBDEADLOCK = 1614 621 | ER_NEED_REPREPARE = 1615 622 | ER_DELAYED_NOT_SUPPORTED = 1616 623 | WARN_NO_MASTER_INFO = 1617 624 | WARN_OPTION_IGNORED = 1618 625 | WARN_PLUGIN_DELETE_BUILTIN = 1619 626 | WARN_PLUGIN_BUSY = 1620 627 | ER_VARIABLE_IS_READONLY = 1621 628 | ER_WARN_ENGINE_TRANSACTION_ROLLBACK = 1622 629 | ER_SLAVE_HEARTBEAT_FAILURE = 1623 630 | ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE = 1624 631 | ER_NDB_REPLICATION_SCHEMA_ERROR = 1625 632 | ER_CONFLICT_FN_PARSE_ERROR = 1626 633 | ER_EXCEPTIONS_WRITE_ERROR = 1627 634 | ER_TOO_LONG_TABLE_COMMENT = 1628 635 | ER_TOO_LONG_FIELD_COMMENT = 1629 636 | ER_FUNC_INEXISTENT_NAME_COLLISION = 1630 637 | ER_DATABASE_NAME = 1631 638 | ER_TABLE_NAME = 1632 639 | ER_PARTITION_NAME = 1633 640 | ER_SUBPARTITION_NAME = 1634 641 | ER_TEMPORARY_NAME = 1635 642 | ER_RENAMED_NAME = 1636 643 | ER_TOO_MANY_CONCURRENT_TRXS = 1637 644 | WARN_NON_ASCII_SEPARATOR_NOT_IMPLEMENTED = 1638 645 | ER_DEBUG_SYNC_TIMEOUT = 1639 646 | ER_DEBUG_SYNC_HIT_LIMIT = 1640 647 | ER_DUP_SIGNAL_SET = 1641 648 | ER_SIGNAL_WARN = 1642 649 | ER_SIGNAL_NOT_FOUND = 1643 650 | ER_SIGNAL_EXCEPTION = 1644 651 | ER_RESIGNAL_WITHOUT_ACTIVE_HANDLER = 1645 652 | ER_SIGNAL_BAD_CONDITION_TYPE = 1646 653 | WARN_COND_ITEM_TRUNCATED = 1647 654 | ER_COND_ITEM_TOO_LONG = 1648 655 | ER_UNKNOWN_LOCALE = 1649 656 | ER_SLAVE_IGNORE_SERVER_IDS = 1650 657 | ER_QUERY_CACHE_DISABLED = 1651 658 | ER_SAME_NAME_PARTITION_FIELD = 1652 659 | ER_PARTITION_COLUMN_LIST_ERROR = 1653 660 | ER_WRONG_TYPE_COLUMN_VALUE_ERROR = 1654 661 | ER_TOO_MANY_PARTITION_FUNC_FIELDS_ERROR = 1655 662 | ER_MAXVALUE_IN_VALUES_IN = 1656 663 | ER_TOO_MANY_VALUES_ERROR = 1657 664 | ER_ROW_SINGLE_PARTITION_FIELD_ERROR = 1658 665 | ER_FIELD_TYPE_NOT_ALLOWED_AS_PARTITION_FIELD = 1659 666 | ER_PARTITION_FIELDS_TOO_LONG = 1660 667 | ER_BINLOG_ROW_ENGINE_AND_STMT_ENGINE = 1661 668 | ER_BINLOG_ROW_MODE_AND_STMT_ENGINE = 1662 669 | ER_BINLOG_UNSAFE_AND_STMT_ENGINE = 1663 670 | ER_BINLOG_ROW_INJECTION_AND_STMT_ENGINE = 1664 671 | ER_BINLOG_STMT_MODE_AND_ROW_ENGINE = 1665 672 | ER_BINLOG_ROW_INJECTION_AND_STMT_MODE = 1666 673 | ER_BINLOG_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE = 1667 674 | ER_BINLOG_UNSAFE_LIMIT = 1668 675 | ER_BINLOG_UNSAFE_INSERT_DELAYED = 1669 676 | ER_BINLOG_UNSAFE_SYSTEM_TABLE = 1670 677 | ER_BINLOG_UNSAFE_AUTOINC_COLUMNS = 1671 678 | ER_BINLOG_UNSAFE_UDF = 1672 679 | ER_BINLOG_UNSAFE_SYSTEM_VARIABLE = 1673 680 | ER_BINLOG_UNSAFE_SYSTEM_FUNCTION = 1674 681 | ER_BINLOG_UNSAFE_NONTRANS_AFTER_TRANS = 1675 682 | ER_MESSAGE_AND_STATEMENT = 1676 683 | ER_SLAVE_CONVERSION_FAILED = 1677 684 | ER_SLAVE_CANT_CREATE_CONVERSION = 1678 685 | ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_FORMAT = 1679 686 | ER_PATH_LENGTH = 1680 687 | ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT = 1681 688 | ER_WRONG_NATIVE_TABLE_STRUCTURE = 1682 689 | ER_WRONG_PERFSCHEMA_USAGE = 1683 690 | ER_WARN_I_S_SKIPPED_TABLE = 1684 691 | ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_BINLOG_DIRECT = 1685 692 | ER_STORED_FUNCTION_PREVENTS_SWITCH_BINLOG_DIRECT = 1686 693 | ER_SPATIAL_MUST_HAVE_GEOM_COL = 1687 694 | ER_TOO_LONG_INDEX_COMMENT = 1688 695 | ER_LOCK_ABORTED = 1689 696 | ER_DATA_OUT_OF_RANGE = 1690 697 | ER_WRONG_SPVAR_TYPE_IN_LIMIT = 1691 698 | ER_BINLOG_UNSAFE_MULTIPLE_ENGINES_AND_SELF_LOGGING_ENGINE = 1692 699 | ER_BINLOG_UNSAFE_MIXED_STATEMENT = 1693 700 | ER_INSIDE_TRANSACTION_PREVENTS_SWITCH_SQL_LOG_BIN = 1694 701 | ER_STORED_FUNCTION_PREVENTS_SWITCH_SQL_LOG_BIN = 1695 702 | ER_FAILED_READ_FROM_PAR_FILE = 1696 703 | ER_VALUES_IS_NOT_INT_TYPE_ERROR = 1697 704 | ER_ACCESS_DENIED_NO_PASSWORD_ERROR = 1698 705 | ER_SET_PASSWORD_AUTH_PLUGIN = 1699 706 | ER_GRANT_PLUGIN_USER_EXISTS = 1700 707 | ER_TRUNCATE_ILLEGAL_FK = 1701 708 | ER_PLUGIN_IS_PERMANENT = 1702 709 | ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN = 1703 710 | ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX = 1704 711 | ER_MEMSQL_FAILED_CODE_GEN = 1705 712 | ER_MEMSQL_FEATURE_LOCKDOWN = 1706 713 | ER_MEMSQL_FAILED_PARAMETERIZE = 1707 714 | ER_MEMSQL_AGG_NYA = 1708 715 | ER_MEMSQL_UNUSED_1 = 1709 716 | ER_MEMSQL_HASH_INDEX_NOT_UNIQUE = 1710 717 | ER_MEMSQL_INDEX_NOT_SUPPORTED = 1711 718 | ER_MEMSQL_OOM = 1712 719 | ER_MEMSQL_JOIN_NYA = 1713 720 | ER_MEMSQL_MAX_CON_TOO_HIGH = 1714 721 | ER_MEMSQL_SYNTAX_CHECK_OK = 1715 722 | ER_MEMSQL_NOT_ALLOWED = 1716 723 | ER_MEMSQL_FEATURE_DISABLED = 1717 724 | ER_MEMSQL_REPL_DB_CREATE_OR_DROP = 1718 725 | ER_MEMSQL_WRITE_QUERY_DURING_ALTER_TABLE = 1719 726 | ER_TABLE_MEMORY_LIMIT = 1720 727 | ER_SET_MEMORY_LIMIT = 1721 728 | ER_MEMSQL_INVALID_BUCKET_COUNT = 1722 729 | ER_MEMSQL_BUCKET_COUNT_WITHOUT_HASH = 1723 730 | ER_SET_MAX_MEMORY_LIMIT = 1724 731 | ER_MEMSQL_USERS_TABLE = 1725 732 | ER_CREATE_TABLE_MEMSQL = 1726 733 | ER_DEV_EDITION_MEM_LIMIT = 1727 734 | ER_MEMSQL_SYNC_ERROR = 1728 735 | ER_LOADING_SO = 1729 736 | ER_DB_NOT_ONLINE = 1730 737 | ER_DB_NOT_RECOVERED = 1731 738 | ER_INVALID_STATE = 1732 739 | ER_UNRECOVERABLE = 1733 740 | ER_DURABILITY_DISABLED = 1734 741 | ER_CANNOT_CONNECT_TO_LEAF = 1735 742 | ER_DISTRIBUTED_NOT_AGGREGATOR = 1736 743 | ER_DISTRIBUTED_LEAF_EXISTS = 1737 744 | ER_DISTRIBUTED_LEAF_DOESNT_EXIST = 1738 745 | ER_DISTRIBUTED_LEAF_CANT_CREATE_DB = 1739 746 | ER_DISTRIBUTED_LEAF_INVALID_RANGE = 1740 747 | ER_DISTRIBUTED_QUERY_SHARDED_TABLE_FROM_LEAF = 1741 748 | ER_DISTRIBUTED_REPLICATE_REFERENCE_DATABASE_FAILED = 1742 749 | ER_TOO_MANY_REPLICATION_TARGETS = 1743 750 | ER_MEMSQL_UNIQUE_KEY_SHARD_KEY = 1744 751 | ER_MEMSQL_UNMAPPED_PARTITION = 1745 752 | ER_DISTRIBUTED_UNCLASSIFIED_ERROR = 1746 753 | ER_DISTRIBUTED_NO_LEAVES = 1747 754 | ER_DISTRIBUTED_NO_LEAVES_CREATE_DATABASE = 1748 755 | ER_DISTRIBUTED_FEATURE_LOCKDOWN = 1749 756 | ER_DISTRIBUTED_PARTITIONS_POWER_OF_TWO = 1750 757 | ER_DISTRIBUTED_AMBIGUOUS_PARTITION_KEY = 1751 758 | ER_DISTRIBUTED_NOT_MASTER = 1752 759 | ER_DISTRIBUTED_MASTER = 1753 760 | ER_DISTRIBUTED_UNKNOWN_AGGREGATOR_COMMAND = 1754 761 | ER_DISTRIBUTED_DUMMY3 = 1755 762 | ER_DISTRIBUTED_BIGINT_AUTO_INCREMENT = 1756 763 | ER_DISTRIBUTED_TABLE_NOT_SHARDED = 1757 764 | ER_DISTRIBUTED_LAST_LEAF = 1758 765 | ER_DISTRIBUTED_REFERENCE_TABLES_ON_SINGLE_BOX = 1759 766 | ER_CANNOT_CREATE_REFERENCE_TABLES_ON_LEAF = 1760 767 | ER_DISTRIBUTED_MASTER_IS_ALIVE = 1761 768 | ER_REPL_MEMSQL_DB = 1762 769 | ER_REPL_ONLINE_DB = 1763 770 | ER_UNUSED_ERROR_MESSAGE = 1764 771 | ER_TABLE_LOCK_FAILURE = 1765 772 | ER_DB_QUERY_OFFLINE = 1766 773 | ER_DB_QUERY_PROVISIONING = 1767 774 | ER_DB_QUERY_CORRUPTED = 1768 775 | ER_REPL_ALREADY_REPLICATING = 1769 776 | ER_DISTRIBUTED_QUERY_PARALLELISM_IGNORED = 1770 777 | ER_DISTRIBUTED_LEAF_STATUS_UNRECOGNIZED_FIELD = 1771 778 | ER_DISTRIBUTED_PARTITION_MOVE_FAILED = 1772 779 | ER_DISTRIBUTED_PARTITION_ORDINAL_TOO_BIG = 1773 780 | ER_DISTRIBUTED_INSTANCE_ON_NODE_ALREADY_PRESENT = 1774 781 | ER_DISTRIBUTED_CHECK_PARTITIONS_FAILURE = 1775 782 | ER_DISTRIBUTED_INSTANCE_IS_MASTER = 1776 783 | ER_DISTRIBUTED_PARTITION_HAS_NO_INSTANCES = 1777 784 | ER_DISTRIBUTED_PARTITION_NAME_TOO_LONG = 1778 785 | ER_DISTRIBUTED_INVALID_GROUP_ID = 1779 786 | ER_DISTRIBUTED_LEAF_LOST_PAIR = 1780 787 | ER_DISTRIBUTED_LEAF_HAS_NO_PAIR_REORGANIZE = 1781 788 | ER_DISTRIBUTED_GROUPS_UNBALANCED = 1782 789 | ER_DISTRIBUTED_INVALID_REDUNDANCY_LEVEL = 1783 790 | ER_DISTRIBUTED_GROUP_NOT_EMPTY = 1784 791 | ER_DISTRIBUTED_REBALANCE_WARNING = 1785 792 | ER_DISTRIBUTED_LEAF_COUNT_LIMIT = 1786 793 | ER_DISTRIBUTED_CONCURRENT_NODE_REMOVAL = 1787 794 | ER_DROP_DURING_RESTORE = 1788 795 | ER_INVALID_BACKUP_PATH = 1789 796 | ER_INVALID_RESTORE_PATH = 1790 797 | ER_RESTORE_OUT_OF_DISK = 1791 798 | ER_BACKUP_IO_ERROR = 1792 799 | ER_CANNOT_CONTINUE_REPLICATING = 1793 800 | ER_UNKNOWN_EXPLAIN_FORMAT = 1794 801 | ER_DISTRIBUTED_DATABASE_NOT_SHARDED = 1795 802 | ER_DISTRIBUTED_CONCURRENT_REBALANCE = 1796 803 | ER_RECOVERY_IN_PROGRESS = 1797 804 | ER_DISTRIBUTED_AGGREGATOR_LEAF = 1798 805 | ER_DISTRIBUTED_MASTER_AGGREGATOR_AS_LEAF = 1799 806 | ER_DISTRIBUTED_MULTIPLE_FOREIGN_KEYS = 1800 807 | ER_DISTRIBUTED_FOREIGN_KEY_SHARD_KEY = 1801 808 | ER_DISTRIBUTED_LEAF_IS_OFFLINE = 1802 809 | ER_DISTRIBUTED_LEAF_ALREADY_ONLINE = 1803 810 | ER_DISTRIBUTED_LEAF_NOT_ONLINE = 1804 811 | ER_MULTIPLE_SHARD_KEYS = 1805 812 | ER_CONCURRENT_LOAD_FILE = 1806 813 | ER_DISTRIBUTED_USER_PERMISSIONS = 1807 814 | ER_DISTRIBUTED_LEAF_USER_PERMISSIONS = 1808 815 | ER_DISTRIBUTED_OUT_OF_CAPACITY = 1809 816 | ER_UNUSED = 1810 817 | ER_DISTRIBUTED_ATTACH_LEAF_FAILED = 1811 818 | ER_MEMSQL_READ_QUERY_DURING_ALTER_TABLE = 1812 819 | ER_MEMSQL_ONLINE_ALTER_IMPOSSIBLE = 1813 820 | ER_DISTRIBUTED_USER_BAD_HOST = 1814 821 | ER_DISTRIBUTED_USER_DELETE = 1815 822 | ER_FAILED_FOREIGN_KEY_CONSTRAINT = 1816 823 | ER_FOREIGN_KEY_INDEX = 1817 824 | ER_FOREIGN_KEY_TYPE_MISMATCH = 1818 825 | ER_FOREIGN_KEY_GENERIC_ERROR = 1819 826 | ER_MEMSQL_COMPUTED_DEPEND_ON_NON_EXISTING = 1820 827 | ER_MEMSQL_COMPUTED_DEPENDENCY_LOOP = 1821 828 | ER_MEMSQL_COMPUTED_INSERT = 1822 829 | ER_MEMSQL_COMPUTED_UPDATE = 1823 830 | ER_MEMSQL_COMPUTED_DEPEND_ON_TIME = 1824 831 | ER_MEMSQL_COMPUTED_IS_TIMESTAMP = 1825 832 | ER_MEMSQL_COMPUTED_EXPR_TOO_LONG = 1826 833 | ER_MEMSQL_INVALID_LEAF_SIGNAL = 1827 834 | ER_WRONG_KEY_COLUMN_JSON = 1828 835 | ER_UNSUPPORTED_AUTH_PLUGIN = 1829 836 | ER_ERROR_LAST = 1829 837 | 838 | def lookup_by_number(errno): 839 | """ Used for development only """ 840 | for key, val in globals().items(): 841 | if errno == val: 842 | print(key) 843 | -------------------------------------------------------------------------------- /memsql/common/exceptions.py: -------------------------------------------------------------------------------- 1 | 2 | class NotConnected(Exception): 3 | pass 4 | 5 | class RequiresDatabase(Exception): 6 | pass 7 | -------------------------------------------------------------------------------- /memsql/common/json.py: -------------------------------------------------------------------------------- 1 | import simplejson 2 | 3 | def simplejson_datetime_serializer(obj): 4 | """ 5 | Designed to be passed as the default kwarg in simplejson.dumps. Serializes dates and datetimes to ISO strings. 6 | """ 7 | if hasattr(obj, 'isoformat'): 8 | return obj.isoformat() 9 | else: 10 | raise TypeError('Object of type %s with value of %s is not JSON serializable' % (type(obj), repr(obj))) 11 | 12 | def _set_defaults(kwargs): 13 | kwargs.setdefault('separators', (',', ':')) 14 | kwargs.setdefault('default', simplejson_datetime_serializer) 15 | return kwargs 16 | 17 | def dumps(data, **kwargs): 18 | return simplejson.dumps(data, **_set_defaults(kwargs)) 19 | 20 | def loads(data, **kwargs): 21 | return simplejson.loads(data, **kwargs) 22 | -------------------------------------------------------------------------------- /memsql/common/query_builder.py: -------------------------------------------------------------------------------- 1 | def simple_expression(joiner=', ', **fields): 2 | """ Build a simple expression ready to be added onto another query. 3 | 4 | >>> simple_expression(joiner=' AND ', name='bob', role='admin') 5 | "`name`=%(_QB_name)s AND `name`=%(_QB_role)s", { '_QB_name': 'bob', '_QB_role': 'admin' } 6 | """ 7 | expression, params = [], {} 8 | 9 | for field_name, value in sorted(fields.items(), key=lambda kv: kv[0]): 10 | key = '_QB_%s' % field_name 11 | expression.append('`%s`=%%(%s)s' % (field_name, key)) 12 | params[key] = value 13 | 14 | return joiner.join(expression), params 15 | 16 | def update(table_name, **fields): 17 | """ Build a update query. 18 | 19 | >>> update('foo_table', a=5, b=2) 20 | "UPDATE `foo_table` SET `a`=%(_QB_a)s, `b`=%(_QB_b)s", { '_QB_a': 5, '_QB_b': 2 } 21 | """ 22 | prefix = "UPDATE `%s` SET " % table_name 23 | sets, params = simple_expression(', ', **fields) 24 | return prefix + sets, params 25 | 26 | def multi_insert(table_name, *rows): 27 | """ Build a multi-insert query. 28 | Each row in rows should be a dict of { column_name: column_value } 29 | 30 | >>> multi_insert('foo_table', { 'a': 5, 'b': 2 }, { 'a': 5, 'b': 2 }) 31 | "INSERT INTO `foo_table` (`a`, `b`) VALUES (%(_QB_ROW_0)s), (%(_QB_ROW_1)s)" 32 | """ 33 | return __multi_insert(table_name, rows) 34 | 35 | def multi_replace(table_name, *rows): 36 | """ Build a multi-replace query. 37 | Each row in rows should be a dict of { column_name: column_value } 38 | 39 | >>> multi_replace('foo_table', { 'a': 5, 'b': 2 }, { 'a': 5, 'b': 2 }) 40 | "REPLACE INTO `foo_table` (`a`, `b`) VALUES (%(_QB_ROW_0)s), (%(_QB_ROW_1)s)" 41 | """ 42 | return __multi_insert(table_name, rows, replace=True) 43 | 44 | def __multi_insert(table_name, rows, replace=False): 45 | cols = sorted(rows[0].keys()) 46 | prefix = '%s INTO `%s` (%s) VALUES ' % ( 47 | 'REPLACE' if replace else 'INSERT', 48 | table_name, 49 | ', '.join(['`%s`' % col for col in cols]) 50 | ) 51 | sql, params = [], {} 52 | 53 | for i, row in enumerate(rows): 54 | key = '_QB_ROW_%d' % i 55 | params[key] = [ v for c, v in sorted(row.items(), key=lambda kv: cols.index(kv[0])) ] 56 | sql.append('(%%(%s)s)' % key) 57 | 58 | return prefix + ', '.join(sql), params 59 | -------------------------------------------------------------------------------- /memsql/common/random_aggregator_pool.py: -------------------------------------------------------------------------------- 1 | from memsql.common.connection_pool import ConnectionPool, PoolConnectionException 2 | from memsql.common import errorcodes 3 | from memsql.common.database import DatabaseError 4 | from wraptor.decorators import memoize 5 | import threading 6 | import random 7 | import logging 8 | 9 | class RandomAggregatorPool(object): 10 | """ A automatic fail-over connection pool. 11 | 12 | One layer above the connection pool. It's purpose is to choose a 13 | random aggregator and use it while it is available. If not it fails 14 | over to another aggregator. The class maintains the list of 15 | aggregators by periodically calling `SHOW AGGREGATORS`. 16 | 17 | Note: If you point this class at a MemSQL Singlebox instance, it 18 | will still work, but all connections will just be made to the 19 | singlebox node. 20 | """ 21 | 22 | def __init__(self, host, port, user='root', password='', database='information_schema'): 23 | """ Initialize the RandomAggregatorPool with connection 24 | information for an aggregator in a MemSQL Distributed System. 25 | 26 | All aggregator connections will share the same user/password/database. 27 | """ 28 | self.logger = logging.getLogger('memsql.random_aggregator_pool') 29 | self._pool = ConnectionPool() 30 | self._refresh_aggregator_list = memoize(30)(self._update_aggregator_list) 31 | self._lock = threading.RLock() 32 | 33 | self._primary_aggregator = (host, port) 34 | self._user = user 35 | self._password = password 36 | self._database = database 37 | self._aggregators = [] 38 | self._aggregator = None 39 | self._master_aggregator = None 40 | 41 | def connect(self): 42 | """ Returns an aggregator connection, and periodically updates the aggregator list. """ 43 | conn = self._connect() 44 | self._refresh_aggregator_list(conn) 45 | return conn 46 | 47 | def connect_master(self): 48 | if self._master_aggregator is None: 49 | with self._pool_connect(self._primary_aggregator) as conn: 50 | self._update_aggregator_list(conn) 51 | conn.expire() 52 | try: 53 | return self._pool_connect(self._master_aggregator) 54 | except PoolConnectionException: 55 | return None 56 | 57 | def close(self): 58 | self._pool.close() 59 | 60 | def _pool_connect(self, agg): 61 | """ `agg` should be (host, port) 62 | Returns a live connection from the connection pool 63 | """ 64 | return self._pool.connect(agg[0], agg[1], self._user, self._password, self._database) 65 | 66 | def _connect(self): 67 | """ Returns an aggregator connection. """ 68 | with self._lock: 69 | if self._aggregator: 70 | try: 71 | return self._pool_connect(self._aggregator) 72 | except PoolConnectionException: 73 | self._aggregator = None 74 | 75 | if not len(self._aggregators): 76 | with self._pool_connect(self._primary_aggregator) as conn: 77 | self._update_aggregator_list(conn) 78 | conn.expire() 79 | 80 | random.shuffle(self._aggregators) 81 | 82 | last_exception = None 83 | for aggregator in self._aggregators: 84 | self.logger.debug('Attempting connection with %s:%s' % (aggregator[0], aggregator[1])) 85 | 86 | try: 87 | conn = self._pool_connect(aggregator) 88 | # connection successful! 89 | self._aggregator = aggregator 90 | return conn 91 | except PoolConnectionException as e: 92 | # connection error 93 | last_exception = e 94 | else: 95 | # bad news bears... try again later 96 | self._aggregator = None 97 | self._aggregators = [] 98 | 99 | raise last_exception 100 | 101 | def _update_aggregator_list(self, conn): 102 | try: 103 | rows = conn.query('SHOW AGGREGATORS') 104 | except DatabaseError as e: 105 | if e.args[0] == errorcodes.ER_DISTRIBUTED_NOT_AGGREGATOR: 106 | # connected to memsql singlebox 107 | self._aggregators = [self._primary_aggregator] 108 | self._master_aggregator = self._primary_aggregator 109 | else: 110 | raise 111 | else: 112 | with self._lock: 113 | self._aggregators = [] 114 | for row in rows: 115 | if row.Host == '127.0.0.1': 116 | # this is the aggregator we are connecting to 117 | row['Host'] = conn.connection_info()[0] 118 | if int(row.Master_Aggregator) == 1: 119 | self._master_aggregator = (row.Host, row.Port) 120 | self._aggregators.append((row.Host, row.Port)) 121 | 122 | assert len(self._aggregators) > 0, "Failed to retrieve a list of aggregators" 123 | 124 | self.logger.debug('Aggregator list is updated to %s. Current aggregator is %s.' % (self._aggregators, self._aggregator)) 125 | -------------------------------------------------------------------------------- /memsql/common/test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/memsql/memsql-python/4334c6291cb2294c6e52763ac14628080e9ba601/memsql/common/test/__init__.py -------------------------------------------------------------------------------- /memsql/common/test/conftest.py: -------------------------------------------------------------------------------- 1 | import os 2 | import pytest 3 | from memsql.common import database 4 | 5 | host = os.environ.get('MEMSQL_PYTHON_TEST_HOST', '127.0.0.1') 6 | 7 | @pytest.fixture(scope="module") 8 | def test_db_database(): 9 | return "memsql_python_tests" 10 | 11 | @pytest.fixture(scope="module") 12 | def test_db_args(): 13 | return { 14 | "host": host, 15 | "port": 3306, 16 | "user": 'root', 17 | "password": 'mysql' 18 | } 19 | 20 | @pytest.fixture 21 | def test_db_conn(test_db_args): 22 | return database.connect(**test_db_args) 23 | -------------------------------------------------------------------------------- /memsql/common/test/test_connection_pool.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import mock 3 | import multiprocessing 4 | 5 | from memsql.common.test.test_database_adapters import TestQueries 6 | 7 | @pytest.fixture 8 | def test_key(test_db_args): 9 | return (test_db_args['host'], 10 | test_db_args['port'], 11 | test_db_args['user'], 12 | test_db_args['password'], 13 | "information_schema", 14 | None, 15 | multiprocessing.current_process().pid) 16 | 17 | @pytest.fixture 18 | def pool(): 19 | from memsql.common.connection_pool import ConnectionPool 20 | return ConnectionPool() 21 | 22 | @pytest.fixture 23 | def db_args(test_key): 24 | return test_key[:-2] 25 | 26 | @pytest.fixture 27 | def fairy(pool, db_args): 28 | return pool.connect(*db_args) 29 | 30 | def test_checkout(pool, test_key, db_args): 31 | fairy = pool.connect(*db_args) 32 | 33 | assert fairy._key == test_key 34 | 35 | assert len(pool._connections) == 1 36 | assert list(pool._connections.values())[0].qsize() == 0 37 | assert len(pool._fairies) == 1 38 | 39 | def test_checkout_options(pool, db_args): 40 | from memsql.common.connection_pool import PoolConnectionException 41 | args = ("example.com",) + db_args[1:5] + ({ "connect_timeout": 1 },) 42 | with pytest.raises(PoolConnectionException): 43 | pool.connect(*args) 44 | 45 | def test_checkin(pool, fairy): 46 | pool.checkin(fairy, fairy._key, fairy._conn) 47 | 48 | assert len(pool._fairies) == 0 49 | assert len(pool._connections) == 1 50 | assert list(pool._connections.values())[0].qsize() == 1 51 | 52 | def test_connection_reuse(pool, test_key, db_args): 53 | fairy = pool.connect(*db_args) 54 | db_conn = fairy._conn 55 | fairy.close() 56 | assert len(pool._connections) == 1 57 | assert list(pool._connections.values())[0].qsize() == 1 58 | 59 | fairy = pool.connect(*db_args) 60 | assert fairy._conn == db_conn 61 | fairy.close() 62 | assert len(pool._connections) == 1 63 | assert list(pool._connections.values())[0].qsize() == 1 64 | 65 | def test_cant_checkout_old_fairy_after_restart(pool, test_key, db_args): 66 | fairy = pool.connect(*db_args) 67 | assert pool._current_version == 0 68 | 69 | db_conn = fairy._conn 70 | assert db_conn._version == 0 71 | fairy.close() 72 | 73 | pool.rolling_restart() 74 | assert pool._current_version == 1 75 | assert len(pool._connections) == 1 76 | assert list(pool._connections.values())[0].qsize() == 1 77 | 78 | fairy = pool.connect(*db_args) 79 | assert fairy._conn != db_conn 80 | assert fairy._conn._version == 1 81 | 82 | fairy.close() 83 | assert len(pool._connections) == 1 84 | assert list(pool._connections.values())[0].qsize() == 1 85 | 86 | def test_cant_checkin_old_fairy_after_restart(pool, test_key, db_args): 87 | fairy = pool.connect(*db_args) 88 | assert pool._current_version == 0 89 | 90 | db_conn = fairy._conn 91 | assert db_conn._version == 0 92 | 93 | pool.rolling_restart() 94 | assert pool._current_version == 1 95 | 96 | fairy.close() 97 | 98 | assert len(pool._connections) == 1 99 | assert list(pool._connections.values())[0].qsize() == 0 100 | 101 | def test_connection_invalidation(pool, test_key, db_args): 102 | fairy = pool.connect(*db_args) 103 | db_conn = fairy._conn 104 | r = fairy.query('SELECT 1') 105 | assert r[0]['1'] == 1 106 | fairy.close() 107 | db_conn.close() 108 | fairy = pool.connect(*db_args) 109 | # We should not have used the same db connection because we should have 110 | # detected that it was closed. 111 | assert fairy._conn != db_conn 112 | r = fairy.query('SELECT 1') 113 | assert r[0]['1'] == 1 114 | fairy.close() 115 | 116 | def test_connection_close(pool, db_args): 117 | fairy = pool.connect(*db_args) 118 | fairy2 = pool.connect(*db_args) 119 | assert fairy 120 | assert fairy2 121 | pool.close() 122 | 123 | assert len(pool._fairies) == 0 124 | for queue in pool._connections.values(): 125 | assert queue.qsize() == 0 126 | 127 | def test_connection_info(fairy, db_args): 128 | conn_info = fairy.connection_info() 129 | assert len(conn_info) == 2 130 | assert conn_info[0] == db_args[0] 131 | assert conn_info[1] == db_args[1] 132 | 133 | def test_fairy_expire(pool, test_key, db_args): 134 | fairy = pool.connect(*db_args) 135 | 136 | fairy.expire() 137 | fairy.close() 138 | assert len(pool._fairies) == 0 139 | assert len(pool._connections) == 1 140 | assert list(pool._connections.values())[0].qsize() == 0 141 | 142 | def test_fairy_reconnect(fairy): 143 | assert fairy.connected() 144 | fairy.reconnect() 145 | assert fairy.connected() 146 | 147 | @pytest.fixture() 148 | def _fairy_queries_fixture(request, fairy, test_db_args, test_db_database): 149 | test_queries = TestQueries() 150 | conn = test_queries._x_conn(request, test_db_args, test_db_database) 151 | test_queries._ensure_schema(conn, request) 152 | 153 | def test_fairy_queries(fairy, _fairy_queries_fixture, test_db_database): 154 | test_queries = TestQueries() 155 | fairy.select_db(test_db_database) 156 | 157 | for attr in dir(test_queries): 158 | if attr.startswith('test_') and "bytes" not in attr: 159 | getattr(test_queries, attr)(fairy) 160 | fairy.execute('DELETE FROM x') 161 | 162 | def test_fairy_query(fairy): 163 | r = fairy.query('SELECT 1') 164 | assert r[0]['1'] == 1 165 | 166 | def test_fairy_get(fairy): 167 | r = fairy.get('SELECT 1') 168 | assert r['1'] == 1 169 | 170 | def test_fairy_execute(fairy): 171 | fairy.execute('SELECT 1') 172 | 173 | def test_fairy_execute_lastrowid(fairy): 174 | row_id = fairy.execute_lastrowid('SELECT 1') 175 | assert isinstance(row_id, int) 176 | 177 | @mock.patch('memsql.common.database.Connection') 178 | def test_socket_issues(mock_class, pool, db_args, test_key): 179 | from memsql.common.connection_pool import PoolConnectionException 180 | import errno, socket 181 | instance = mock_class.return_value 182 | 183 | def raise_ioerror(*args, **kwargs): 184 | raise socket.error(errno.ECONNRESET, "connection reset") 185 | instance.query.side_effect = raise_ioerror 186 | 187 | fairy = pool.connect(*db_args) 188 | 189 | with pytest.raises(PoolConnectionException) as exc: 190 | fairy.query('SELECT 1') 191 | 192 | assert fairy._expired 193 | 194 | e = exc.value 195 | assert e.message == 'connection reset' 196 | assert (e.host, e.port, e.user, e.password, e.db_name, e.options, e.pid) == test_key 197 | 198 | def test_sql_errors(fairy): 199 | from MySQLdb._mysql import ProgrammingError 200 | with pytest.raises(ProgrammingError): 201 | fairy.query('asdf bad query!!') 202 | 203 | def test_exception_remapping(pool, db_args, test_db_database): 204 | from memsql.common.connection_pool import PoolConnectionException 205 | from memsql.common import errorcodes 206 | from MySQLdb import _mysql 207 | 208 | # check that some operationalerrors get mapped to PoolConnectionException 209 | bad_db_args = db_args[:-1] + ("aasjdkfjdoes_not_exist",) 210 | fairy = None 211 | with pytest.raises(PoolConnectionException): 212 | fairy = pool.connect(*bad_db_args) 213 | assert fairy is None 214 | 215 | # other programmer errors should not be mapped 216 | fairy = pool.connect(*db_args) 217 | fairy.query('CREATE DATABASE IF NOT EXISTS %s' % test_db_database) 218 | fairy.select_db(test_db_database) 219 | fairy.query('CREATE TABLE IF NOT EXISTS x (id BIGINT PRIMARY KEY)') 220 | 221 | with pytest.raises(_mysql.DatabaseError) as exc: 222 | fairy.query('SELECT bad_key FROM x') 223 | 224 | assert not fairy._expired 225 | e = exc.value 226 | assert e.args == (errorcodes.ER_BAD_FIELD_ERROR, "Unknown column 'bad_key' in 'field list'") 227 | 228 | def test_size(pool, test_key, db_args): 229 | assert pool.size() == 0 230 | 231 | fairy = pool.connect(*db_args) 232 | fairy.close() 233 | 234 | assert pool.size() == 1 235 | 236 | fairy = pool.connect(*db_args) 237 | fairy2 = pool.connect(*db_args) # noqa 238 | 239 | assert pool.size() == 2 240 | -------------------------------------------------------------------------------- /memsql/common/test/test_database_adapters.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import pytest 4 | import time 5 | from memsql.common import query_builder, database 6 | import uuid 7 | import datetime 8 | import copy 9 | 10 | def test_connection_open(test_db_conn): 11 | assert test_db_conn.connected() 12 | 13 | def test_connection_close(test_db_conn): 14 | assert test_db_conn.connected() 15 | test_db_conn.close() 16 | assert not test_db_conn.connected() 17 | 18 | def test_reconnect(test_db_conn): 19 | assert test_db_conn.connected() 20 | db_instance = test_db_conn._db 21 | test_db_conn.reconnect() 22 | assert test_db_conn.connected() 23 | assert db_instance != test_db_conn._db 24 | 25 | def test_query(test_db_conn): 26 | # select result 27 | res = test_db_conn.query('select 1') 28 | assert len(res) == 1 29 | assert res[0]['1'] == 1 30 | 31 | def test_ping(test_db_conn): 32 | test_db_conn.ping() 33 | 34 | def test_thread_id(test_db_conn): 35 | assert isinstance(test_db_conn.thread_id(), int) 36 | 37 | def test_connection_options(test_db_args): 38 | args = copy.deepcopy(test_db_args) 39 | args["host"] = "example.com" 40 | args["options"] = { "connect_timeout": 1 } 41 | with pytest.raises(database.OperationalError): 42 | conn = database.connect(**args) 43 | conn.query("SHOW TABLES") 44 | 45 | class TestQueries(object): 46 | @pytest.fixture(scope="class") 47 | def x_conn(self, request, test_db_args, test_db_database): 48 | return self._x_conn(request, test_db_args, test_db_database) 49 | 50 | def _x_conn(self, request, test_db_args, test_db_database): 51 | conn = database.connect(**test_db_args) 52 | conn.execute('CREATE DATABASE IF NOT EXISTS %s CHARACTER SET utf8 COLLATE utf8_general_ci' % test_db_database) 53 | conn.select_db(test_db_database) 54 | 55 | def cleanup(): 56 | conn.execute('DROP DATABASE %s' % test_db_database) 57 | request.addfinalizer(cleanup) 58 | 59 | return conn 60 | 61 | @pytest.fixture(scope="class", autouse=True) 62 | def ensure_schema(self, x_conn, request): 63 | return self._ensure_schema(x_conn, request) 64 | 65 | def _ensure_schema(self, x_conn, request): 66 | x_conn.execute('DROP TABLE IF EXISTS x') 67 | x_conn.execute(""" 68 | CREATE TABLE x ( 69 | id BIGINT AUTO_INCREMENT PRIMARY KEY, 70 | value INT, 71 | col1 VARCHAR(255), 72 | col2 VARCHAR(255), 73 | colb VARBINARY(32) 74 | ) DEFAULT CHARSET=utf8 75 | """) 76 | 77 | @pytest.fixture(autouse=True) 78 | def ensure_empty(self, x_conn, request): 79 | return self._ensure_empty(x_conn, request) 80 | 81 | def _ensure_empty(self, x_conn, request): 82 | cleanup = lambda: x_conn.execute('DELETE FROM x') 83 | cleanup() 84 | request.addfinalizer(cleanup) 85 | 86 | def test_insert(self, x_conn): 87 | res = x_conn.query('INSERT INTO x (value) VALUES(1)') 88 | assert isinstance(res, int) 89 | assert res == 1 # 1 affected row 90 | 91 | res = x_conn.execute('INSERT INTO x (value) VALUES(1)') 92 | assert isinstance(res, int) 93 | 94 | res = x_conn.execute_lastrowid('INSERT INTO x (value) VALUES(1)') 95 | assert isinstance(res, int) 96 | last_row = x_conn.get('SELECT * FROM x ORDER BY id DESC LIMIT 1') 97 | assert res == last_row.id 98 | 99 | def test_insert_qb(self, x_conn): 100 | rows = [ 101 | { 'id': 1, 'value': '2', 'col1': 1223.4, 'col2': datetime.datetime.now(), 'colb': True }, 102 | { 'id': 2, 'value': None, 'col1': None, 'col2': None, 'colb': None }, 103 | ] 104 | sql, params = query_builder.multi_insert('x', *rows) 105 | 106 | res = x_conn.query(sql, **params) 107 | assert isinstance(res, int) 108 | assert res == 2 # 2 affected row 109 | 110 | all_rows = x_conn.query('SELECT * FROM x ORDER BY id ASC') 111 | assert len(all_rows) == 2 112 | assert all_rows[0].value == 2 113 | assert all_rows[0].col1 == "1223.4" 114 | assert all_rows[1].value == None 115 | 116 | def test_select(self, x_conn): 117 | x_conn.execute('INSERT INTO x (value) VALUES (1), (2), (3)') 118 | 119 | all_rows = x_conn.query('SELECT * FROM x ORDER BY value ASC') 120 | assert len(all_rows) == 3 121 | assert all_rows[1].value == 2 122 | 123 | first_row = x_conn.get('SELECT * FROM x ORDER BY id LIMIT 1') 124 | assert first_row.value == 1 125 | 126 | def test_unicode(self, x_conn): 127 | x_conn.execute("SET NAMES utf8") 128 | 129 | x_conn.execute('INSERT INTO x (col1) VALUES (%s)', '⚑☃❄') 130 | rows = x_conn.query('SELECT * FROM x WHERE col1=%s', '⚑☃❄') 131 | assert len(rows) == 1 132 | assert rows[0].col1 == '⚑☃❄' 133 | 134 | rows = x_conn.query('SELECT * FROM x WHERE col1 in (%s)', ['⚑☃❄', 'jones']) 135 | assert len(rows) == 1 136 | assert rows[0].col1 == '⚑☃❄' 137 | 138 | rows = x_conn.query('SELECT * FROM x WHERE col1=%(col1)s', col1='⚑☃❄') 139 | assert len(rows) == 1 140 | assert rows[0].col1 == '⚑☃❄' 141 | 142 | def test_queryparams(self, x_conn): 143 | x_conn.execute('INSERT INTO x (value) VALUES (1), (2), (3)') 144 | 145 | rows = x_conn.query('SELECT * FROM x WHERE value > %s AND value < %s', 1, 3) 146 | 147 | assert len(rows) == 1 148 | assert rows[0].value == 2 149 | 150 | def test_advanced_params(self, x_conn): 151 | # multi-column insert with array 152 | x_conn.debug_query(''' 153 | INSERT INTO x (value, col1, col2) VALUES (%s, %s) 154 | ''', 1, ['bob', 'jones']) 155 | 156 | x_conn.debug_query(''' 157 | INSERT INTO x (value, col1, col2) VALUES (%(value)s, %(other)s) 158 | ''', value=1, other=['bob', 'jones']) 159 | 160 | rows = x_conn.query('SELECT * FROM x WHERE value = %s and col1 = %s', 1, 'bob') 161 | 162 | assert len(rows) == 2 163 | for i in range(2): 164 | assert rows[i].value == 1 165 | assert rows[i].col1 == 'bob' 166 | assert rows[i].col2 == 'jones' 167 | 168 | def test_kwargs(self, x_conn): 169 | # multi-column insert with kwargs 170 | x_conn.execute(''' 171 | INSERT INTO x (value, col1, col2) VALUES (%(value)s, %(col1)s, %(col2)s) 172 | ''', value=1, col1='bilbo', col2='jones') 173 | 174 | rows = x_conn.query('SELECT * FROM x WHERE value = %s and col1 = %s', 1, 'bilbo') 175 | 176 | assert len(rows) == 1 177 | assert rows[0].value == 1 178 | assert rows[0].col1 == 'bilbo' 179 | assert rows[0].col2 == 'jones' 180 | 181 | def test_kwargs_all(self, x_conn): 182 | # ensure they all support kwargs 183 | for method in ['debug_query', 'query', 'get', 'execute', 'execute_lastrowid']: 184 | getattr(x_conn, method)('''select * from x where col1=%(col1)s''', col1='bilbo') 185 | 186 | def test_kwparams_exclusive(self, x_conn): 187 | # ensure they are all exclusive 188 | for method in ['debug_query', 'query', 'get', 'execute', 'execute_lastrowid']: 189 | with pytest.raises(ValueError): 190 | getattr(x_conn, method)('''select * from x where col1=%(col1)s''', 1, col1='bilbo') 191 | 192 | def test_single_format(self, x_conn): 193 | x_conn.query("select * from x where col1 LIKE '%'") 194 | 195 | def test_ensure_connected(self, x_conn): 196 | old = x_conn.max_idle_time 197 | before_db = x_conn._db 198 | 199 | try: 200 | x_conn.max_idle_time = 0 201 | time.sleep(1) 202 | x_conn.query("select * from x") 203 | 204 | assert x_conn._db != before_db 205 | finally: 206 | x_conn.max_idle_time = old 207 | -------------------------------------------------------------------------------- /memsql/common/test/test_query_builder.py: -------------------------------------------------------------------------------- 1 | from memsql.common import query_builder, database 2 | 3 | def test_simple_expression(): 4 | x = { 'a': 1, 'b': '2', 'c': 1223.4 } 5 | sql, params = query_builder.simple_expression(', ', **x) 6 | assert sql == '`a`=%(_QB_a)s, `b`=%(_QB_b)s, `c`=%(_QB_c)s' 7 | assert params == { '_QB_a': 1, '_QB_b': '2', '_QB_c': 1223.4 } 8 | assert database.escape_query(sql, params) == r"`a`=1, `b`='2', `c`=1223.4e0" 9 | 10 | def test_update(): 11 | x = { 'a': 1, 'b': '2', 'c': 1223.4 } 12 | sql, params = query_builder.update('foo', **x) 13 | assert sql == 'UPDATE `foo` SET `a`=%(_QB_a)s, `b`=%(_QB_b)s, `c`=%(_QB_c)s' 14 | assert params == { '_QB_a': 1, '_QB_b': '2', '_QB_c': 1223.4 } 15 | assert database.escape_query(sql, params) == r"UPDATE `foo` SET `a`=1, `b`='2', `c`=1223.4e0" 16 | 17 | def test_multi_insert(): 18 | rows = [{ 'a': 1, 'b': '2', 'c': 1223.4 }, { 'a': 2, 'b': '5', 'c': 1 }] 19 | sql, params = query_builder.multi_insert('foo', *rows) 20 | assert sql == 'INSERT INTO `foo` (`a`, `b`, `c`) VALUES (%(_QB_ROW_0)s), (%(_QB_ROW_1)s)' 21 | assert params == { '_QB_ROW_0': [1, '2', 1223.4], '_QB_ROW_1': [2, '5', 1] } 22 | assert database.escape_query(sql, params) == r"INSERT INTO `foo` (`a`, `b`, `c`) VALUES (1,'2',1223.4e0), (2,'5',1)" 23 | 24 | def test_replace(): 25 | rows = [{ 'a': 1, 'b': '2', 'c': 1223.4 }, { 'a': 2, 'b': '5', 'c': 1 }] 26 | sql, params = query_builder.multi_replace('foo', *rows) 27 | assert sql == 'REPLACE INTO `foo` (`a`, `b`, `c`) VALUES (%(_QB_ROW_0)s), (%(_QB_ROW_1)s)' 28 | assert params == { '_QB_ROW_0': [1, '2', 1223.4], '_QB_ROW_1': [2, '5', 1] } 29 | assert database.escape_query(sql, params) == r"REPLACE INTO `foo` (`a`, `b`, `c`) VALUES (1,'2',1223.4e0), (2,'5',1)" 30 | -------------------------------------------------------------------------------- /memsql/common/test/test_select_result.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from memsql.common import database 3 | import random 4 | import pytest 5 | import simplejson as json 6 | 7 | try: 8 | from collections import OrderedDict 9 | except ImportError: 10 | from ordereddict import OrderedDict 11 | 12 | FIELDS = ['l\\u203pez', 'ಠ_ಠ', 'cloud', 'moon', 'water', 'computer', 'school', 'network', 13 | 'hammer', 'walking', 'mediocre', 'literature', 'chair', 'two', 'window', 'cords', 'musical', 14 | 'zebra', 'xylophone', 'penguin', 'home', 'dog', 'final', 'ink', 'teacher', 'fun', 'website', 15 | 'banana', 'uncle', 'softly', 'mega', 'ten', 'awesome', 'attatch', 'blue', 'internet', 'bottle', 16 | 'tight', 'zone', 'tomato', 'prison', 'hydro', 'cleaning', 'telivision', 'send', 'frog', 'cup', 17 | 'book', 'zooming', 'falling', 'evily', 'gamer', 'lid', 'juice', 'moniter', 'captain', 'bonding'] 18 | 19 | def test_result_order(): 20 | raw_data = [[random.randint(1, 2 ** 32) for _ in range(len(FIELDS))] for _ in range(256)] 21 | res = database.SelectResult(FIELDS, raw_data) 22 | 23 | for i, row in enumerate(res): 24 | reference = dict(zip(FIELDS, raw_data[i])) 25 | ordered = OrderedDict(zip(FIELDS, raw_data[i])) 26 | doppel = database.Row(FIELDS, raw_data[i]) 27 | 28 | assert doppel == row 29 | assert row == reference 30 | assert row == ordered 31 | assert list(row.keys()) == FIELDS 32 | assert list(row.values()) == raw_data[i] 33 | assert sorted(row) == sorted(FIELDS) 34 | assert list(row.items()) == list(zip(FIELDS, raw_data[i])) 35 | assert list(row.values()) == raw_data[i] 36 | assert list(row.keys()) == FIELDS 37 | assert list(row.items()) == list(zip(FIELDS, raw_data[i])) 38 | 39 | for f in FIELDS: 40 | assert f in row 41 | assert f in row 42 | assert row[f] == reference[f] 43 | assert row['cloud'] == reference['cloud'] 44 | assert row[f] == ordered[f] 45 | assert row['cloud'] == ordered['cloud'] 46 | 47 | assert dict(row) == reference 48 | assert dict(row) == dict(ordered) 49 | 50 | with pytest.raises(KeyError): 51 | row['derp'] 52 | 53 | with pytest.raises(AttributeError): 54 | row.derp 55 | 56 | with pytest.raises(NotImplementedError): 57 | row.pop() 58 | 59 | with pytest.raises(NotImplementedError): 60 | reversed(row) 61 | 62 | with pytest.raises(NotImplementedError): 63 | row.update({'a': 'b'}) 64 | 65 | with pytest.raises(NotImplementedError): 66 | row.setdefault('foo', 'bar') 67 | 68 | with pytest.raises(NotImplementedError): 69 | row.fromkeys((1,)) 70 | 71 | with pytest.raises(NotImplementedError): 72 | row.clear() 73 | 74 | with pytest.raises(NotImplementedError): 75 | del row['mega'] 76 | 77 | reference['foo'] = 'bar' 78 | reference['cloud'] = 'blah' 79 | ordered['foo'] = 'bar' 80 | ordered['cloud'] = 'blah' 81 | row['foo'] = 'bar' 82 | row['cloud'] = 'blah' 83 | 84 | assert row == reference 85 | assert dict(row) == reference 86 | assert len(row) == len(reference) 87 | assert row == ordered 88 | assert dict(row) == dict(ordered) 89 | assert len(row) == len(ordered) 90 | 91 | assert row.get('cloud') == reference.get('cloud') 92 | assert row.get('cloud') == ordered.get('cloud') 93 | assert row.get('NOPE', 1) == reference.get('NOPE', 1) 94 | assert row.get('NOPE', 1) == ordered.get('NOPE', 1) 95 | 96 | assert json.dumps(row, sort_keys=True) == json.dumps(reference, sort_keys=True) 97 | assert json.dumps(row, sort_keys=True) == json.dumps(ordered, sort_keys=True) 98 | -------------------------------------------------------------------------------- /memsql/common/test/thread_monitor.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | try: 4 | import queue 5 | except ImportError: 6 | import Queue as queue 7 | 8 | class ThreadMonitor(object): 9 | """Helper class for catching exceptions generated in threads. 10 | http://blog.eugeneoden.com/2008/05/12/testing-threads-with-pytest/ 11 | 12 | Usage: 13 | 14 | mon = ThreadMonitor() 15 | 16 | th = threading.Thread(target=mon.wrap(myFunction)) 17 | th.start() 18 | 19 | th.join() 20 | 21 | mon.check() # raises any exception generated in the thread 22 | 23 | Any raised exception will include a traceback from the original 24 | thread, not the function calling mon.check() 25 | 26 | Works for multiple threads 27 | """ 28 | def __init__(self): 29 | self.queue = queue.Queue() 30 | 31 | def wrap(self, function): 32 | def threadMonitorWrapper(*args, **kw): 33 | try: 34 | ret = function(*args, **kw) 35 | except Exception: 36 | self.queue.put(sys.exc_info()) 37 | raise 38 | 39 | return ret 40 | 41 | return threadMonitorWrapper 42 | 43 | def check(self): 44 | try: 45 | item = self.queue.get(block=False) 46 | except queue.Empty: 47 | return 48 | 49 | klass, value, tb = item 50 | exc = klass(value) 51 | 52 | if hasattr(exc, "with_traceback"): 53 | raise exc.with_traceback(tb) 54 | else: 55 | raise exc 56 | -------------------------------------------------------------------------------- /memsql/common/util.py: -------------------------------------------------------------------------------- 1 | def timedelta_total_seconds(td): 2 | """ Needed for python 2.6 compat """ 3 | return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10. ** 6) / 10. ** 6 4 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from setuptools import setup 4 | from setuptools.command.test import test as TestCommand 5 | 6 | # get version 7 | from memsql import __version__ 8 | 9 | from pathlib import Path 10 | 11 | this_directory = Path(__file__).parent 12 | long_description = (this_directory / "README.md").read_text() 13 | 14 | 15 | REQUIREMENTS = [ 16 | 'wraptor', 17 | 'simplejson', 18 | 'python-dateutil<3.0', 19 | 'mysqlclient>=1.4,<3.0', 20 | ] 21 | 22 | class PyTest(TestCommand): 23 | user_options = [ 24 | ('watch', 'w', 25 | "watch tests for changes"), 26 | ('pdb', 'i', 27 | "start pdb on failures"), 28 | ('scan=', 's', 29 | "only search for tests in the specified directory or file"), 30 | ('exitfirst', 'x', 31 | "Stop tests on first failure"), 32 | ('expression=', 'k', 33 | "Only run tests matching given expression"), 34 | ('verbose', 'v', 35 | "Print out all output as it happens"), 36 | ] 37 | boolean_options = ['watch'] 38 | 39 | def initialize_options(self): 40 | self.watch = False 41 | self.verbose = False 42 | self.pdb = False 43 | self.scan = None 44 | self.exitfirst = False 45 | self.expression = None 46 | self.test_suite = None 47 | self.test_module = None 48 | self.test_loader = None 49 | self.test_runner = None 50 | 51 | def finalize_options(self): 52 | TestCommand.finalize_options(self) 53 | 54 | self.test_suite = True 55 | self.test_args = ['-v'] 56 | if self.watch: 57 | self.test_args.append('-f') 58 | if self.verbose: 59 | self.test_args.append('-s') 60 | if self.pdb: 61 | self.test_args.append('--pdb') 62 | if self.exitfirst: 63 | self.test_args.append('-x') 64 | if self.expression: 65 | self.test_args.extend(['-k', self.expression]) 66 | if self.scan is not None: 67 | self.test_args.append(self.scan) 68 | else: 69 | self.test_args.append('memsql') 70 | 71 | def run_tests(self): 72 | import os, sys, glob 73 | 74 | MY_PATH = os.path.dirname(__file__) 75 | sys.path.append(MY_PATH) 76 | os.environ['PYTHONPATH'] = os.environ.get('PYTHONPATH', '') + ':' + MY_PATH 77 | 78 | egg_dirs = glob.glob('*.egg*') 79 | ignore_args = ['--ignore=%s' % d for d in egg_dirs] 80 | 81 | import pytest 82 | errno = pytest.main(ignore_args + self.test_args) 83 | raise sys.exit(errno) 84 | 85 | setup( 86 | name='memsql', 87 | version=__version__, 88 | author='MemSQL', 89 | author_email='support@memsql.com', 90 | url='http://github.com/memsql/memsql-python', 91 | license='LICENSE.txt', 92 | description='Useful utilities and plugins for MemSQL integration.', 93 | long_description=long_description, 94 | long_description_content_type="text/markdown", 95 | classifiers=[ 96 | 'License :: OSI Approved :: MIT License', 97 | 'Programming Language :: Python :: 3.4', 98 | 'Programming Language :: Python :: 3.5', 99 | 'Programming Language :: Python :: 3.6', 100 | 'Programming Language :: Python :: 3.7', 101 | ], 102 | packages=[ 103 | 'memsql', 104 | 'memsql.common', 105 | ], 106 | zip_safe=False, 107 | install_requires=REQUIREMENTS, 108 | tests_require=['pytest', 'mock', 'ordereddict==1.1'], 109 | cmdclass={ 'test': PyTest }, 110 | ) 111 | --------------------------------------------------------------------------------