├── .dockerignore ├── .github └── workflows │ └── main.yml ├── .gitignore ├── Dockerfile ├── LICENSE ├── README.md ├── TODO.md ├── changelog ├── cli └── mysql-cli.py ├── dev-docker-compose.yaml ├── docker-compose.yaml ├── docs ├── architecture.md ├── contributing.md ├── getting-started.md ├── migration.md └── mm-architecture.png ├── mysql_manager ├── __init__.py ├── base.py ├── cluster.py ├── cluster_data_handler.py ├── constants.py ├── dto.py ├── enums.py ├── etcd.py ├── exceptions │ ├── __init__.py │ └── exceptions.py ├── helpers │ ├── __init__.py │ ├── clone_compatibility_checker.py │ └── query_builder.py ├── instance.py ├── metrics.py ├── proxysql.py └── tests.py ├── pip.conf ├── poetry.lock ├── pyproject.toml ├── requirements.test.txt ├── requirements.txt ├── scripts ├── check-servers-up.py ├── start-replication-cli.sh ├── start-replication-with-proxysql-cli.sh ├── start-replication-with-proxysql.py ├── start-replication.py ├── start-simple-with-proxysql-cli.sh └── start-simple-with-proxysql.py └── tests ├── cli-mysql-1-proxysql.sh ├── cli-mysql-1-to-2-proxysql.sh ├── cli-mysql-2-proxysql-failover.sh ├── cli-mysql-2-proxysql.sh ├── config ├── mm-config-mysql-1.yaml ├── mm-config-mysql-2-migrate.yaml ├── mm-config-mysql-2.yaml ├── mysql-exporter-s1.cnf ├── mysql-exporter-s2.cnf ├── mysql-s1.cnf ├── mysql-s2.cnf ├── prometheus.yaml ├── proxysql.cnf └── rules.yaml ├── features ├── environment.py ├── failover.feature ├── idempotency.feature ├── migrate-remote.feature ├── mysql-add-replica.feature ├── one-mysql-and-haproxy.feature ├── remove-replica.feature ├── steps │ └── steps.py └── two-mysqls-and-haproxy.feature ├── integration_test └── environment │ ├── component_provider.py │ ├── etcd │ └── etcd_container_provider.py │ ├── haproxy │ └── haproxy_container_provider.py │ ├── mysql │ └── mysql_container_provider.py │ ├── mysql_manager │ └── mysql_manager_container_provider.py │ ├── proxysql │ └── proxysql_container_provider.py │ └── test_environment_factory.py ├── k8s ├── mysql-1-proxysql-components.yaml ├── mysql-1-proxysql.sh ├── mysql-2-components.yaml ├── mysql-2-proxysql-components.yaml └── mysql-2-proxysql.sh └── setup-etcd.sh /.dockerignore: -------------------------------------------------------------------------------- 1 | tests/ 2 | scripts/ 3 | configs/ -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Docker Image CI 2 | 3 | on: 4 | push: 5 | branches: [ "main" ] 6 | pull_request: 7 | branches: [ "main" ] 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v4 14 | 15 | - name: Docker meta 16 | id: meta 17 | uses: docker/metadata-action@v5 18 | with: 19 | tags: | 20 | type=sha,prefix=,format=short 21 | env: 22 | DOCKER_METADATA_SHORT_SHA_LENGTH: 8 23 | 24 | - name: Login to Docker Hub 25 | uses: docker/login-action@v3 26 | with: 27 | registry: ${{ vars.REGISTRY_URL }} 28 | username: ${{ secrets.REGISTRY_USER }} 29 | password: ${{ secrets.REGISTRY_PASSWORD }} 30 | 31 | - name: Set up Docker Buildx 32 | uses: docker/setup-buildx-action@v3 33 | 34 | - name: Build image 35 | run: | 36 | docker build . -t ${{ vars.REGISTRY_URL }}/mysql-manager:main -t ${{ vars.REGISTRY_URL }}/mysql-manager:${{ steps.meta.outputs.tags }} 37 | 38 | - name: Behave test 39 | env: 40 | MYSQL_MANAGER_IMAGE: ${{ vars.REGISTRY_URL }}/mysql-manager:${{ steps.meta.outputs.tags }} 41 | HAPROXY_IMAGE: ${{ vars.HAPROXY_IMAGE_ADDRESS }} 42 | run: | 43 | pip install -r requirements.test.txt 44 | behave tests/features 45 | 46 | - name: Push image 47 | run: | 48 | docker push ${{ vars.REGISTRY_URL }}/mysql-manager:main && 49 | docker push ${{ vars.REGISTRY_URL }}/mysql-manager:${{ steps.meta.outputs.tags }} 50 | 51 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .devcontainer.json 2 | venv 3 | config.ini 4 | .env 5 | .idea 6 | __pycache__ 7 | .env 8 | .env-test 9 | tests/integration_test/configs 10 | configs/ 11 | cluster-spec.yaml 12 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM hub.hamdocker.ir/library/python:3.11.3 2 | COPY requirements.txt . 3 | COPY pip.conf /root/.pip/ 4 | RUN pip install -r requirements.txt 5 | WORKDIR /app 6 | COPY . . 7 | ENV PYTHONUNBUFFERED 1 8 | RUN pip install . 9 | 10 | CMD ["python", "cli/mysql-cli.py", "mysql", "run"] 11 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2024 MySQL Manager Contributors 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MySQL Manager 2 | 3 | MySQL Manager is an open source project for managing highly available MySQL replication setups. 4 | It supports MySQL asynchronous replication and ProxySQL for proxy. Go to [documentation](./docs/) 5 | for more details. 6 | 7 | ## Features 8 | - MySQL asynchronous replication 9 | - Automatic failover in case of source (primary) failure 10 | - [Proxy](https://github.com/hamravesh/mysql-manager-haproxy) based on [HAProxy](https://www.haproxy.org/) with both write and readonly ports 11 | - High availability using [etcd](https://etcd.io/) 12 | - Supports migration from other MySQL servers using [CLONE](https://dev.mysql.com/doc/refman/8.0/en/clone-plugin.html) plugin 13 | - Prometheus metrics for observability 14 | 15 | ## Getting started 16 | To get started with MySQL Manager read [getting started doc](./docs/getting-started.md). If you want to migrate from other MySQL servers please read [migration doc](./docs/migration.md) 17 | 18 | ## Contributing and development 19 | Please follow [contributing doc](./docs/contributing.md) to set up your local development 20 | environment and contribute to this project. 21 | 22 | ## License 23 | MySQL Manager is under MIT license. See [LICENSE](LICENSE) for more details. 24 | 25 | -------------------------------------------------------------------------------- /TODO.md: -------------------------------------------------------------------------------- 1 | - [] make MM highly available 2 | - [] use etcd as state store 3 | - [] how add new standbys 4 | - [] add replicas async 5 | - [x] yaml nested config file 6 | - [] add backend in proxysql must be idempotent 7 | - [] restart replica must not be done in an instance that has no replication configured 8 | - [] let standby replica to exec all relay log before stopping its replication 9 | - [] check proxysql state regardless of base gtid set in mysqls 10 | - [] test restart during one master down 11 | - [] test failover to old master 12 | - [] failover on disk full 13 | - [] do not failover if behind master > threshold 14 | - [] add metrics for connection failures 15 | - [] support replicating from another external server 16 | -------------------------------------------------------------------------------- /changelog: -------------------------------------------------------------------------------- 1 | - v0.1 2 | - basic feautures 3 | - v0.1.1 4 | - script for checking servers' health 5 | - v0.2.0 6 | - add cli 7 | -------------------------------------------------------------------------------- /cli/mysql-cli.py: -------------------------------------------------------------------------------- 1 | import os 2 | from configparser import ConfigParser 3 | 4 | 5 | import click 6 | import yaml 7 | import logging 8 | import signal 9 | 10 | from mysql_manager.etcd import EtcdClient 11 | from mysql_manager.cluster import ClusterManager 12 | from mysql_manager.constants import ( 13 | CLUSTER_STATE_FILE_PATH, 14 | MINIMUM_FAIL_INTERVAL 15 | ) 16 | from mysql_manager.exceptions.exceptions import FailIntervalLessThanMinimumError 17 | from mysql_manager.instance import Mysql 18 | from mysql_manager.cluster_data_handler import ClusterDataHandler 19 | from mysql_manager.proxysql import ProxySQL 20 | from mysql_manager.exceptions import MysqlNodeAlreadyExists, MysqlNodeDoesNotExist, ProgramKilled, SourceDatabaseCannotBeDeleted 21 | from mysql_manager.enums import * 22 | 23 | current_dir = os.getcwd() 24 | BASE_DIR = os.path.abspath(os.path.join(current_dir, os.pardir)) 25 | etcd_client = EtcdClient() 26 | cluster_data_handler = ClusterDataHandler() 27 | logger = logging.getLogger(__name__) 28 | 29 | 30 | @click.group() 31 | @click.pass_context 32 | def cli(ctx): 33 | pass 34 | # ctx.ensure_object(dict) 35 | # os.makedirs('/etc/mm', exist_ok=True) 36 | # ctx.obj['CONFIG'] = read_config_file(DEFAULT_CONFIG_PATH) if DEFAULT_CONFIG_PATH else None 37 | 38 | 39 | @cli.group() 40 | @click.pass_context 41 | def mysql(ctx): 42 | pass 43 | 44 | 45 | @cli.command() 46 | def promote(): 47 | if cluster_data_handler.get_cluster_state() != MysqlClusterState.STANDBY.value: 48 | logger.error("You can not promote a cluster that is not standby") 49 | return 50 | 51 | cluster_data_handler.update_cluster_state(MysqlClusterState.NEW.value) 52 | 53 | 54 | @cli.command() 55 | @click.option('-f', '--file', help='MySQL cluster spec file', required=False) 56 | @click.option('-s', '--spec', help='MySQL cluster spec', required=False) 57 | @click.option('--standby', is_flag=True, help='Set this flag if you want to replicate from remote server') 58 | def init(file, spec, standby: bool): 59 | ## TODO: handle inline spec 60 | ## TODO: validate if remote exists in config 61 | with open(file, "r") as sf: 62 | cluster_data = yaml.safe_load(sf.read()) 63 | ## TODO: validate data 64 | names = list(cluster_data["mysqls"].keys()) 65 | cluster_data["mysqls"][names[0]]["role"] = MysqlRoles.SOURCE.value 66 | if len(names) == 2: 67 | cluster_data["mysqls"][names[1]]["role"] = MysqlRoles.REPLICA.value 68 | 69 | if standby: 70 | cluster_data["remote"]["role"] = MysqlRoles.SOURCE.value 71 | cluster_data["status"] = { 72 | "state": MysqlClusterState.STANDBY.value 73 | } 74 | else: 75 | cluster_data["status"] = { 76 | "state": MysqlClusterState.NEW.value 77 | } 78 | cluster_data_handler.write_cluster_data_dict(cluster_data) 79 | 80 | @cli.command() 81 | @click.argument("fail_interval", type=int) 82 | def set_fail_interval(fail_interval): 83 | try: 84 | cluster_data_handler.set_fail_interval(fail_interval) 85 | except FailIntervalLessThanMinimumError: 86 | print(f"The value of fail_interval could not be less than {MINIMUM_FAIL_INTERVAL}") 87 | 88 | @cli.command() 89 | @click.option('-h', '--host', help='MySQL host', required=True) 90 | @click.option('-u', '--user', help='Username for MySQL', default='root') 91 | @click.option('-p', '--password', help='Password for MySQL', required=True) 92 | @click.option('-n', '--name', help='Name for MySQL', required=True) 93 | @click.option('--port', help='Port for MySQL', type=int, default=3306) 94 | def add(host, user, password, name, port): 95 | try: 96 | cluster_data_handler.add_mysql( 97 | name=name, 98 | mysql_data={ 99 | "host": host, 100 | "user": user, 101 | "password": password, 102 | "role": MysqlRoles.REPLICA.value, 103 | } 104 | ) 105 | except MysqlNodeAlreadyExists: 106 | print(f"mysql server with name: {name} can not be added because it already exists.") 107 | 108 | @cli.command() 109 | @click.option('-n', '--name', help='Name for MySQL', required=True) 110 | def remove(name): 111 | try: 112 | cluster_data_handler.remove_mysql( 113 | name=name, 114 | ) 115 | except MysqlNodeDoesNotExist: 116 | print(f"{name} mysql is not in database cluster.") 117 | except SourceDatabaseCannotBeDeleted: 118 | print(f"{name} mysql can not be removed because it is source database.") 119 | 120 | @mysql.command() 121 | def get_cluster_status(): 122 | # state = etcd_client.read_status() 123 | with open(CLUSTER_STATE_FILE_PATH, "r") as sf: 124 | state = yaml.safe_load(sf) 125 | 126 | print("source="+state.get("source")) 127 | print("replica="+state.get("replica")) 128 | 129 | 130 | def signal_handler(signum, frame): 131 | raise ProgramKilled() 132 | 133 | 134 | @mysql.command() 135 | def run(): 136 | signal.signal(signal.SIGTERM, signal_handler) 137 | signal.signal(signal.SIGINT, signal_handler) 138 | # create_config_file_from_env(nodes_count=nodes) 139 | print("Starting cluster manager...") 140 | try: 141 | clm = ClusterManager() 142 | clm.run() 143 | except ProgramKilled: 144 | print("Received termination signal. Exiting...") 145 | 146 | 147 | if __name__ == '__main__': 148 | cli() 149 | -------------------------------------------------------------------------------- /dev-docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | mysql-s1: 5 | image: hub.hamdocker.ir/library/mysql:8.0.35-bullseye 6 | network_mode: host 7 | environment: 8 | MYSQL_ROOT_PASSWORD: 'root' 9 | volumes: 10 | - './tests/config/mysql-s1.cnf:/etc/mysql/conf.d/mysql.cnf' 11 | restart: always 12 | ports: 13 | - 3306:3306 14 | mysql-s2: 15 | image: hub.hamdocker.ir/library/mysql:8.0.35-bullseye 16 | environment: 17 | MYSQL_ROOT_PASSWORD: 'root' 18 | volumes: 19 | - './tests/config/mysql-s2.cnf:/etc/mysql/conf.d/mysql.cnf' 20 | restart: always 21 | ports: 22 | - 3307:3306 23 | proxysql: 24 | image: hub.hamdocker.ir/proxysql/proxysql:2.6.2 25 | volumes: 26 | - './tests/config/proxysql.cnf:/etc/proxysql.cnf' 27 | ports: 28 | - 6032:6032 29 | -------------------------------------------------------------------------------- /docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | 3 | services: 4 | mm: 5 | # image: registry.hamdocker.ir/public/mysql-manager:b212be8e 6 | build: . 7 | environment: 8 | ETCD_HOST: etcd 9 | ETCD_USERNAME: mm 10 | ETCD_PASSWORD: password 11 | ETCD_PREFIX: mm/cluster1/ 12 | volumes: 13 | - './tests/config/mm-config-mysql-2.yaml:/etc/mm/cluster-spec.yaml' 14 | etcd: 15 | image: quay.hamdocker.ir/coreos/etcd:v3.5.9-amd64 16 | command: 17 | - etcd 18 | - --data-dir=/var/lib/etcd 19 | - --name=mm-etcd 20 | - --advertise-client-urls=http://etcd:2379 21 | - --initial-cluster-token=etcd-cluster 22 | - --initial-cluster-state=new 23 | - --listen-client-urls=http://0.0.0.0:2379 24 | - --listen-metrics-urls=http://0.0.0.0:2381 25 | - --listen-peer-urls=http://0.0.0.0:2380 26 | - --auto-compaction-mode=revision 27 | - --auto-compaction-retention=5 28 | # volumes: 29 | # - ./etcd-data/:/var/lib/etcd/ 30 | mysql-s1: 31 | image: hub.hamdocker.ir/library/mysql:8.0.35-bullseye 32 | environment: 33 | MYSQL_ROOT_PASSWORD: 'root' 34 | volumes: 35 | - './tests/config/mysql-s1.cnf:/etc/mysql/conf.d/mysql.cnf' 36 | # - './mysql-s1-data:/var/lib/mysql' 37 | restart: always 38 | mysql-s2: 39 | image: hub.hamdocker.ir/library/mysql:8.0.35-bullseye 40 | environment: 41 | MYSQL_ROOT_PASSWORD: 'root' 42 | volumes: 43 | - './tests/config/mysql-s2.cnf:/etc/mysql/conf.d/mysql.cnf' 44 | # - './mysql-s2-data:/var/lib/mysql' 45 | restart: always 46 | mysql-exporter-s1: 47 | image: hub.hamdocker.ir/prom/mysqld-exporter:v0.15.1 48 | command: "--config.my-cnf=/etc/my.cnf" 49 | volumes: 50 | - './tests/config/mysql-exporter-s1.cnf:/etc/my.cnf' 51 | mysql-exporter-s2: 52 | image: hub.hamdocker.ir/prom/mysqld-exporter:v0.15.1 53 | command: "--config.my-cnf=/etc/my.cnf" 54 | volumes: 55 | - './tests/config/mysql-exporter-s2.cnf:/etc/my.cnf' 56 | proxysql: 57 | image: hub.hamdocker.ir/proxysql/proxysql:2.6.2 58 | volumes: 59 | - './tests/config/proxysql.cnf:/etc/proxysql.cnf' 60 | # - './proxysql-data:/var/lib/proxysql' 61 | promtheus: 62 | image: hub.hamdocker.ir/prom/prometheus 63 | volumes: 64 | - './tests/config/prometheus.yaml:/etc/prometheus/prometheus.yml' 65 | - './tests/config/rules.yaml:/etc/prometheus/rules.yaml' 66 | ports: 67 | - "9090:9090" 68 | -------------------------------------------------------------------------------- /docs/architecture.md: -------------------------------------------------------------------------------- 1 | This is the architecture that we propose for MySQL clustering: 2 | 3 | ![MySQL Manager architecture](./mm-architecture.png) 4 | 5 | PV1 and PV2 refer to disks that each MySQL server has. 6 | -------------------------------------------------------------------------------- /docs/contributing.md: -------------------------------------------------------------------------------- 1 | First off, thanks for your support. Feel free to open issues if you have seen 2 | any bugs or you need more features. If you want to contribute bug fixes 3 | or features to this project please read below. 4 | 5 | ## installation 6 | You should create your own python virtual environment and install packages in it: 7 | ```sh 8 | pip3 install -r requirements.txt 9 | ``` 10 | Then you can start working on the project. To run project please follow instructions in 11 | [getting started doc](./getting-started.md). To run test go to [test with behave](./contributing.md#test-with-behave) 12 | 13 | ## build 14 | Run this to build docker image: 15 | ```sh 16 | VERSION= 17 | docker build -t registry.hamdocker.ir/public/mysql-manager:$VERSION -t registry.hamdocker.ir/public/mysql-manager:latest . 18 | docker push registry.hamdocker.ir/public/mysql-manager:$VERSION 19 | docker push registry.hamdocker.ir/public/mysql-manager:latest 20 | ``` 21 | 22 | ## generate requirements.txt 23 | This is needed when you add new python package using `poetry add` 24 | ```sh 25 | pip install poetry 26 | poetry add 27 | ## or 28 | poetry update 29 | poetry export --without-hashes --format=requirements.txt > requirements.txt 30 | ``` 31 | 32 | ## tests 33 | To run tests: 34 | ```sh 35 | docker compose down 36 | docker compose up 37 | ./tests/mysql_replication.sh 38 | ./tests/mysql_replication_with_proxysql.sh 39 | ``` 40 | 41 | ## test with behave 42 | First install behave: 43 | ```sh 44 | pip install behave testcontainers 45 | behave tests/features 46 | ## if you want to build image 47 | BUILD_IMAGE=true behave tests/features 48 | ## if you want to test specific feature 49 | behave tests/features/.feature 50 | ## to test a specific scenario at line LINENO 51 | behave tests/features/.feature:LINENO 52 | ``` 53 | 54 | ## design and scenarios 55 | when the cluster manager is created it creates src and repl based on s1 and s2, respectively. 56 | scenarios: 57 | - mm is up during failover and startup of cluster, one proxysql, one master and one repl 58 | - on startup with 2 nodes: 59 | - wait for all to become up 60 | - check if proxysql is configured: 61 | - if not, initialize proxysql setup 62 | - find src and repl 63 | - if both have gtid base (only 5 transactions from their own server_uuid) and proxysql is not configured: s1 is master and s2 is repl, config users and stuff in src, add_replica_to_master, add backends to proxysql 64 | - if repl.is_replica and repl.master is src do nothing 65 | - if src.is_replica and src.master is repl: switch src and repl 66 | - on startup with 1 node: 67 | - wait for all nodes to become up 68 | - check if proxysql is configured: 69 | - if not, initialize proxysql setup 70 | - if src has base gtid set and proxysql is not configured: config users and stuff in src, add src to proxysql 71 | - reconcile: 72 | - if `old_master_joined` is false 73 | - try to connect to repl: 74 | - make it a replica of src 75 | - clone src data 76 | - enable readonly 77 | - add it to proxysql 78 | - ping repl and src 79 | - if src is not up and repl is up: 80 | - update cluster state 81 | - increase `master_failure_count` by 1 82 | - if `master_failure_count` > `MASTER_FAILURE_THRESHOLD` 83 | - set the variable `old_master_joined` to false 84 | - shun src in proxysql 85 | - stop replication, reset replica and disable read_only in repl (repl will be new master) 86 | - switch repl and src 87 | - try to connect to old master: 88 | - make it a replica of new master 89 | - clone its data 90 | - enable readonly 91 | - if both are down 92 | - update cluster state 93 | - increase `master_failure_count` by 1 94 | - increase metric `cluster_connection_failure` (we will get alert) 95 | - if replica is down 96 | - update cluster state 97 | - if replica is up but has replication problem: 98 | - restart its replication 99 | - wait for 5 seconds 100 | - fails: 101 | - new master fails when old master is not ready yet 102 | - mm is restarted before old master is joined to new master 103 | - master disk is full or read-only 104 | - replica has problem syncing with master 105 | - mm starts when replica has problem connecting to master 106 | - mm is restarted when adding initial user and stuff in src 107 | - crashes when src or repl are not available 108 | 109 | 110 | metrics: 111 | - `total_cluster_connection_failure` 112 | - `total_successful_failover` 113 | - current server roles 114 | 115 | failover test scenarios: 116 | - 2 mysql servers up, 1 proxysql up: 117 | - master fails. tests: 118 | - deleted in proxysql 119 | - old replica must be master: 120 | - read_only = 0 121 | - super_read_only = 0 122 | - no replication config 123 | - added as a writeable host in proxysql 124 | - deleted old master 125 | - old master must join the cluster 126 | - read_only = 1 127 | - super_readonly = 0 128 | - replicating from new master 129 | - gtid executed set must match that of new master's 130 | - new master fails 131 | - after failover mm restarts 132 | - after initial setup mm restarts 133 | - master fails and becomes running before failure threshold 134 | 135 | 136 | -------------------------------------------------------------------------------- /docs/getting-started.md: -------------------------------------------------------------------------------- 1 | ## deploy with docker 2 | You can use [docker-compose.yaml](./../docker-compose.yaml) to deploy all needed 3 | services. Make sure to uncomment volumes if you need persistence. 4 | ```sh 5 | docker compose up etcd -d 6 | ``` 7 | It deploys etcd to store state of the cluster. 8 | After running the etcd, we need to do some inital setup in it. 9 | First create root user and another user with limited privileges for MySQL Manager. 10 | ```sh 11 | docker compose exec etcd etcdctl user add root --new-user-password="password" 12 | docker compose exec etcd etcdctl user grant-role root root 13 | docker compose exec etcd etcdctl user add mm --new-user-password="password" 14 | docker compose exec etcd etcdctl role add mm 15 | docker compose exec etcd etcdctl role grant-permission mm --prefix=true readwrite mm/cluster1/ 16 | docker compose exec etcd etcdctl user grant-role mm mm 17 | docker compose exec etcd etcdctl auth enable 18 | ``` 19 | We created `mm` user with password `password`. In a production environment you need to set a 20 | much stronger password. User `mm` is granted access to `mm/cluster1/` prefix in etcd. Note that 21 | the environment variables in `mm` container must match the values given above. 22 | 23 | Then run all other containers: 24 | ```sh 25 | docker compose up -d --build 26 | ``` 27 | It deploys two MySQL servers, their exporters and a ProxySQL. MySQL Manager is set up by building 28 | an image from Dockerfile at the root of the project. Now start cluster with this command: 29 | ```sh 30 | docker compose exec mm python cli/mysql-cli.py init -f /etc/mm/cluster-spec.yaml 31 | ``` 32 | Running `python cli/mysql-cli.py init -f /etc/mm/cluster-spec.yaml` in `mm` makes MySQL Manager 33 | to start cluster setup, watch for their state and failover if needed. After a minute check `mm` 34 | logs: 35 | ```sh 36 | docker compose logs mm -f 37 | ``` 38 | You should see these lines in its logs: 39 | ```log 40 | Source is mysql-s1 41 | Replica is mysql-s2 42 | ``` 43 | You can check server states by running this: 44 | ```sh 45 | docker compose exec mm python cli/mysql-cli.py mysql get-cluster-status 46 | ``` 47 | The output is like this: 48 | ```log 49 | source=up 50 | replica=up 51 | ``` 52 | 53 | 54 | ## destroy 55 | to destroy the containers run: 56 | ```sh 57 | docker compose down 58 | ``` 59 | -------------------------------------------------------------------------------- /docs/migration.md: -------------------------------------------------------------------------------- 1 | ## Requirements 2 | You can migrate from other MySQL servers (we call them remote servers) to MySQL Manager clusters. 3 | MySQL Manager uses clone plugin to migrate data from remote server. 4 | It checks for compatibility in the remote server. Version check is not 5 | implemented because some distributions add strings to the version and make it 6 | hard to compare versions. Please refer to 7 | [this doc](https://dev.mysql.com/doc/refman/8.0/en/clone-plugin-remote.html) for 8 | version requirements when using clone plugin. 9 | Other requirements are checked by MySQL Manager itself. 10 | 11 | ## Deployment 12 | You should follow instructions in [Getting Started](./getting-started.md) doc to 13 | setup a cluster. Replace `mm-config-mysql-2.yaml` in [docker-compose.yaml](../docker-compose.yaml) with `mm-config-mysql-2-migrate.yaml` and when starting cluster 14 | in `mm` container add `--standby` flag: 15 | ```sh 16 | docker compose exec mm python cli/mysql-cli.py init -f /etc/mm/cluster-spec.yaml --standby 17 | ``` 18 | -------------------------------------------------------------------------------- /docs/mm-architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hamravesh/mysql-manager/02300bce2f1694a094d600960cec0fc3d3947775/docs/mm-architecture.png -------------------------------------------------------------------------------- /mysql_manager/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hamravesh/mysql-manager/02300bce2f1694a094d600960cec0fc3d3947775/mysql_manager/__init__.py -------------------------------------------------------------------------------- /mysql_manager/base.py: -------------------------------------------------------------------------------- 1 | import pymysql 2 | import datetime 3 | from mysql_manager.exceptions import MysqlConnectionException 4 | 5 | class BaseServer: 6 | def __init__(self, host: str, user: str, password: str, port: int=3306) -> None: 7 | self.host = host 8 | self.port = port 9 | self.user = user 10 | self.password = password 11 | 12 | def _log(self, msg) -> None: 13 | print(str(datetime.datetime.now()) + " " + "[" + self.host + "] " + msg) 14 | 15 | # TODO: use single connection 16 | def _get_db(self): 17 | db = None 18 | try: 19 | db = pymysql.Connection( 20 | host=self.host, 21 | port=self.port, 22 | user=self.user, 23 | password=self.password, 24 | cursorclass=pymysql.cursors.DictCursor, 25 | ) 26 | except Exception as e: 27 | self._log(str(e)) 28 | return None 29 | return db 30 | 31 | def fetch(self, query: str, args: list) -> dict: 32 | db = self._get_db() 33 | if db is None: 34 | self._log("Could not connect to mysql") 35 | raise MysqlConnectionException() 36 | 37 | result = None 38 | with db: 39 | with db.cursor() as cursor: 40 | try: 41 | cursor.execute(query, args=args) 42 | result = cursor.fetchall() 43 | except Exception as e: 44 | self._log(str(e)) 45 | raise e 46 | return result 47 | 48 | def run_command(self, command: str) -> dict: 49 | db = self._get_db() 50 | if db is None: 51 | self._log("Could not connect to mysql") 52 | raise MysqlConnectionException() 53 | 54 | result = None 55 | with db: 56 | with db.cursor() as cursor: 57 | try: 58 | cursor.execute(command) 59 | result = cursor.fetchone() 60 | except Exception as e: 61 | self._log(str(e)) 62 | raise e 63 | 64 | return result 65 | 66 | def ping(self) -> bool: 67 | db = self._get_db() 68 | if db is None: 69 | self._log("Could not connect to server") 70 | raise MysqlConnectionException() 71 | 72 | with db: 73 | try: 74 | db.ping(reconnect=True) 75 | except Exception as e: 76 | self._log(str(e)) 77 | raise e 78 | return True 79 | 80 | -------------------------------------------------------------------------------- /mysql_manager/cluster.py: -------------------------------------------------------------------------------- 1 | import time, datetime, yaml, math 2 | from pymysql.err import OperationalError 3 | from dataclasses import asdict 4 | from prometheus_client import start_http_server 5 | 6 | from mysql_manager.helpers.clone_compatibility_checker import CloneCompatibilityChecker 7 | from mysql_manager.instance import Mysql 8 | from mysql_manager.etcd import EtcdClient 9 | from mysql_manager.dto import ClusterData 10 | from mysql_manager.base import BaseServer 11 | # from mysql_manager.proxysql import ProxySQL 12 | from mysql_manager.enums import ( 13 | MysqlReplicationProblem, 14 | MysqlStatus, 15 | MysqlClusterState, 16 | MysqlRoles, 17 | ) 18 | from mysql_manager.cluster_data_handler import ClusterDataHandler 19 | from mysql_manager.exceptions import ( 20 | MysqlConnectionException, 21 | MysqlClusterConfigError, 22 | ) 23 | from mysql_manager.constants import * 24 | from mysql_manager.metrics import ( 25 | FAILOVER_ATTEMPTS, 26 | REPLICATION_RESTARTS, 27 | CLUSTER_FAILURES, 28 | MASTER_UP_STATUS, 29 | REPLICA_UP_STATUS, 30 | ) 31 | 32 | class ClusterManager: 33 | def __init__(self, config_file: str=DEFAULT_CONFIG_PATH): 34 | self.src: Mysql | None = None 35 | self.repl: Mysql | None = None 36 | # self.proxysqls: list[ProxySQL] = [] 37 | self.users: dict = {} 38 | self.remote: Mysql = None 39 | self.config_file = config_file 40 | self.cluster_data_handler = ClusterDataHandler() 41 | self.etcd_client = EtcdClient() 42 | self.fail_interval = None 43 | 44 | # Start Prometheus metrics server on port 8000 45 | start_http_server(8000) 46 | 47 | @property 48 | def master_failure_threshold(self) -> int: 49 | return math.ceil(self.fail_interval / CLUSTER_CHECK_INTERVAL_SECONDS) 50 | 51 | def _log(self, msg) -> None: 52 | print(str(datetime.datetime.now()) + " " + msg) 53 | 54 | def _validate_cluster_spec(self, spec: dict): 55 | if len(spec["mysqls"]) == 0: 56 | raise MysqlClusterConfigError() 57 | 58 | def _load_cluster_data(self): 59 | ## TODO: handle mysql servers with ports other than 3306 60 | self.users = self.cluster_data_handler.get_users() 61 | self.fail_interval = self.cluster_data_handler.get_fail_interval() 62 | does_repl_exist = False 63 | for name, mysql in self.cluster_data_handler.get_mysqls().items(): 64 | if mysql.role == MysqlRoles.SOURCE.value: 65 | if self.src is None or self.src.name != name: 66 | self.src = Mysql(name=name, **asdict(mysql)) 67 | elif mysql.role == MysqlRoles.REPLICA.value: 68 | does_repl_exist = True 69 | if self.repl is None or self.repl.name != name: 70 | self.repl = Mysql(name=name, **asdict(mysql)) 71 | if not does_repl_exist: 72 | self.repl = None 73 | 74 | remote_dto = self.cluster_data_handler.get_remote() 75 | if remote_dto is not None: 76 | self.remote = Mysql(name=REMOTE_SOURCE_NAME, **asdict(remote_dto)) 77 | 78 | def run(self): 79 | while not self.cluster_data_handler.is_cluster_data_available(): 80 | time.sleep(CLUSTER_CHECK_INTERVAL_SECONDS) 81 | self._log("Cluster data not available. Waiting for it...") 82 | 83 | self._load_cluster_data() 84 | 85 | ## here we assume the remote server is always up because we don't have control over it 86 | while self.cluster_data_handler.get_cluster_state() == MysqlClusterState.STANDBY.value: 87 | self._log(f"Cluster is in standby mode. Remote server: {self.remote.host}") 88 | if self.must_replica_join_source(self.src, self.remote): 89 | self.join_source_to_remote(retry=1000) 90 | time.sleep(CLUSTER_CHECK_INTERVAL_SECONDS) 91 | 92 | if self.remote is not None: 93 | self.src.reset_replication() 94 | 95 | self.start() 96 | while True: 97 | self._log("Checking cluster state...") 98 | time.sleep(CLUSTER_CHECK_INTERVAL_SECONDS) 99 | self._load_cluster_data() 100 | self.reconcile_cluster() 101 | 102 | def must_replica_join_source(self, repl: Mysql|None, src: Mysql) -> bool: 103 | # in the first two checks, if replica is not available we return True to 104 | # prevent useless start replication attempts 105 | if repl is None: 106 | return False 107 | if not self.is_server_up(repl): 108 | return False 109 | 110 | repl_status = repl.get_replica_status() 111 | if repl_status is not None and repl_status.get("Source_Host") == src.host: 112 | return False 113 | 114 | return True 115 | 116 | def reconcile_cluster(self): 117 | self._log("Running reconciliation for cluster") 118 | 119 | self.update_cluster_state() 120 | # self._log(str(self.cluster_status)) 121 | self._set_status_metrics() 122 | 123 | if self.repl is not None: 124 | if self.must_replica_join_source(self.repl, self.src): 125 | self.join_replica_to_source(retry=10) 126 | if ( 127 | self.repl.status == MysqlStatus.REPLICATION_THREADS_STOPPED.value 128 | and self.src.status == MysqlStatus.UP.value 129 | ): 130 | self.repl.restart_replication() 131 | REPLICATION_RESTARTS.inc() 132 | elif ( 133 | self.src.status == MysqlStatus.DOWN.value 134 | and self.repl.status == MysqlStatus.DOWN.value 135 | ): 136 | CLUSTER_FAILURES.inc() 137 | self._log("Cluster failure detected: Master and replicas are down.") 138 | elif ( 139 | # TODO: add more checks for replica: if it was not running sql thread for 140 | # a long time, if it is behind master for a long time 141 | self.src.health_check_failures > self.master_failure_threshold 142 | and self.repl.status != MysqlStatus.DOWN.value 143 | ): 144 | self._log("Running failover for cluster") 145 | FAILOVER_ATTEMPTS.inc() 146 | ## TODO: what if we restart when running this 147 | ## TODO: use etcd txn 148 | self.cluster_data_handler.set_mysql_role(self.src.name, MysqlRoles.REPLICA.value) 149 | self.cluster_data_handler.set_mysql_role(self.repl.name, MysqlRoles.SOURCE.value) 150 | ## TODO: let all relay logs to be applied before resetting replication 151 | self.repl.reset_replication() 152 | self.switch_src_and_repl() 153 | 154 | self._log(f"Source is {self.src.host}") 155 | if self.repl is not None: 156 | self._log(f"Replica is {self.repl.host}") 157 | 158 | def _set_status_metrics(self): 159 | MASTER_UP_STATUS.clear() 160 | MASTER_UP_STATUS.labels(host=self.src.host).set( 161 | 1 if self.src.status == MysqlStatus.UP.value else 0 162 | ) 163 | 164 | if self.repl is not None: 165 | REPLICA_UP_STATUS.clear() 166 | REPLICA_UP_STATUS.labels(host=self.repl.host).set( 167 | 1 if self.repl.status == MysqlStatus.UP.value else 0 168 | ) 169 | 170 | def switch_src_and_repl(self): 171 | self._log(f"Switching src[{self.src.host}] and repl[{self.repl.host}]") 172 | tmp_src = Mysql( 173 | self.src.host, 174 | self.src.user, 175 | self.src.password, 176 | self.src.name, 177 | self.src.role, 178 | ) 179 | self.src = Mysql( 180 | self.repl.host, 181 | self.repl.user, 182 | self.repl.password, 183 | self.repl.name, 184 | MysqlRoles.SOURCE.value, 185 | ) 186 | self.repl = Mysql( 187 | tmp_src.host, 188 | tmp_src.user, 189 | tmp_src.password, 190 | tmp_src.name, 191 | MysqlRoles.REPLICA.value, 192 | ) 193 | 194 | def update_cluster_state(self) -> dict: 195 | if self.is_server_up(self.src, retry=1): 196 | self.src.status = MysqlStatus.UP.value 197 | self.src.health_check_failures = 0 198 | else: 199 | self.src.status = MysqlStatus.DOWN.value 200 | self.src.health_check_failures += 1 201 | 202 | if self.repl is not None: 203 | if self.is_server_up(self.repl, retry=1): 204 | self.repl.status = MysqlStatus.UP.value 205 | problems = self.repl.find_replication_problems() 206 | if MysqlReplicationProblem.NOT_REPLICA.value in problems: 207 | self.repl.status = MysqlStatus.NOT_REPLICA.value 208 | return 209 | if ( 210 | MysqlReplicationProblem.SQL_THREAD_NOT_RUNNING.value in problems 211 | or MysqlReplicationProblem.IO_THREAD_NOT_RUNNING.value in problems 212 | ): 213 | self.repl.status = MysqlStatus.REPLICATION_THREADS_STOPPED.value 214 | else: 215 | self.repl.status = MysqlStatus.DOWN.value 216 | 217 | self._write_cluster_state() 218 | 219 | def _write_cluster_state(self): 220 | ## TODO: maybe change this 221 | with open(CLUSTER_STATE_FILE_PATH, "w") as sf: 222 | sf.write(yaml.safe_dump( 223 | { 224 | "source": self.src.status, 225 | "replica": self.repl.status if self.repl is not None else MysqlStatus.DOWN.value, 226 | } 227 | )) 228 | 229 | def start_mysql_replication_from_remote(self): 230 | ## TODO: reset replication all for both of them 231 | self._log(f"Starting replication in {self.src.host} from remote {self.remote.host}") 232 | self.src.set_remote_source(self.remote) 233 | ## DOC: remote.user and remote.password must have replication and clone access in remote 234 | self.src.start_replication(self.remote.user, self.remote.password) 235 | 236 | def start_mysql_replication(self): 237 | ## TODO: reset replication all for both of them 238 | self._log(f"Starting replication in {self.repl.host}") 239 | self.src.add_replica(self.repl) 240 | self.repl.set_source(self.src) 241 | self.repl.start_replication("replica", self.users["replPassword"]) 242 | 243 | def is_server_up(self, server: BaseServer, retry: int=1) -> bool: 244 | for i in range(retry): 245 | try: 246 | server.ping() 247 | return True 248 | except Exception: 249 | # do not sleep in last retry 250 | if i+1 != retry: 251 | time.sleep(RETRY_WAIT_SECONDS) 252 | 253 | return False 254 | 255 | def check_servers_up(self, retry: int=1): 256 | # is_ok = False 257 | for _ in range(retry): 258 | try: 259 | self.src.ping() 260 | if self.repl is not None: 261 | self.repl.ping() 262 | # self.proxysqls[0].ping() 263 | return 264 | except Exception as e: 265 | time.sleep(RETRY_WAIT_SECONDS) 266 | # is_ok = True 267 | 268 | # if is_ok == False: 269 | # raise MysqlConnectionException() 270 | 271 | def config_src_initial_setup(self): 272 | self._log(f"Initial config of src[{self.src.host}]") 273 | self.src.add_pitr_event(minute_intervals=15) 274 | self.src.create_new_user( 275 | "replica", self.users["replPassword"], ["REPLICATION SLAVE"] 276 | ) 277 | self.src.create_database(DEFAULT_DATABASE) 278 | self.src.create_monitoring_user(self.users["exporterPassword"]) 279 | self.src.create_nonpriv_user(self.users["nonprivUser"], self.users["nonprivPassword"]) 280 | 281 | def join_source_to_remote(self, retry: int=1): 282 | ## TODO: check remote server id 283 | self._log("Joining source to remote") 284 | 285 | # TODO: if self.src is in replicating_remote state do not clone remote 286 | self.src.status = MysqlStatus.CLONING_REMOTE.value 287 | if self.repl is not None: 288 | self.repl.status = MysqlStatus.UP.value 289 | self._write_cluster_state() 290 | 291 | self.src.install_plugin("clone", "mysql_clone.so") 292 | self.src.run_command( 293 | f"set persist clone_valid_donor_list='{self.remote.host}:{self.remote.port}'" 294 | ) 295 | self.src.run_command("set persist read_only=0") 296 | 297 | ## we do not proceed until clone is successful 298 | while True: 299 | if not CloneCompatibilityChecker(src=self.src, remote=self.remote).is_clone_possible(): 300 | self._log(f"Cloning is not possible, waiting for {CLONE_COMPATIBILITY_CHECK_INTERVAL_SECONDS} seconds") 301 | time.sleep(CLONE_COMPATIBILITY_CHECK_INTERVAL_SECONDS) 302 | continue 303 | try: 304 | self._log("Cloning remote server") 305 | self.src.run_command( 306 | f"CLONE INSTANCE FROM '{self.remote.user}'@'{self.remote.host}':{self.remote.port} IDENTIFIED BY '{self.remote.password}'" 307 | ) 308 | except OperationalError as o: 309 | self._log(str(o)) 310 | if "Restart server failed (mysqld is not managed by supervisor process)" in str(o): 311 | break 312 | self._log("Failed to clone remote. Trying again...") 313 | time.sleep(CLUSTER_CHECK_INTERVAL_SECONDS) 314 | 315 | self._log("Waiting for source to become ready") 316 | src_main_password = self.src.password 317 | src_main_user = self.src.user 318 | self.src.password = self.remote.password 319 | self.src.user = self.remote.user 320 | ## TODO: when the database is so big, it takes more than 10 retries to get ready. maybe get retry count as a config 321 | if not self.is_server_up(self.src, retry=retry): 322 | return 323 | 324 | self.src.status = MysqlStatus.REPLICATING_REMOTE.value 325 | self._write_cluster_state() 326 | 327 | if self.src.user_exists(src_main_user): 328 | self.src.change_user_password(src_main_user, src_main_password) 329 | else: 330 | self.src.create_new_user(src_main_user, src_main_password, ["ALL"]) 331 | 332 | self.src.password = src_main_password 333 | self.src.user = src_main_user 334 | self.start_mysql_replication_from_remote() 335 | 336 | def join_replica_to_source(self, retry: int=1): 337 | self._log("Joining replica to source") 338 | # TODO: do not clone if the gtids are in sync 339 | self.src.install_plugin("clone", "mysql_clone.so") 340 | self.repl.install_plugin("clone", "mysql_clone.so") 341 | 342 | self.repl.run_command( 343 | f"set persist clone_valid_donor_list='{self.src.host}:3306'" 344 | ) 345 | self.repl.run_command("set persist read_only=0") 346 | try: 347 | self.repl.run_command( 348 | f"CLONE INSTANCE FROM '{self.src.user}'@'{self.src.host}':3306 IDENTIFIED BY '{self.src.password}'" 349 | ) 350 | except OperationalError as o: 351 | self._log(str(o)) 352 | 353 | # TODO: do not continue if this 354 | if self.is_server_up(self.repl, retry=retry): 355 | self.start_mysql_replication() 356 | 357 | def start(self): 358 | self._log("Starting cluster setup...") 359 | self.check_servers_up(retry=10) 360 | 361 | self._log("Initializing config of servers...") 362 | if self.cluster_data_handler.get_cluster_state() == MysqlClusterState.NEW.value: 363 | self.config_src_initial_setup() 364 | ## TODO: what if we restart before writing cluster data? 365 | self.cluster_data_handler.update_cluster_state(MysqlClusterState.CREATED.value) 366 | 367 | -------------------------------------------------------------------------------- /mysql_manager/cluster_data_handler.py: -------------------------------------------------------------------------------- 1 | from mysql_manager.dto import ( 2 | MysqlClusterState, 3 | ClusterStatus, 4 | MysqlData, 5 | ClusterData, 6 | ) 7 | from mysql_manager.enums import MysqlRoles 8 | from mysql_manager.etcd import EtcdClient 9 | from dataclasses import asdict 10 | 11 | from mysql_manager.exceptions import ( 12 | FailIntervalLessThanMinimumError, 13 | MysqlNodeAlreadyExists, 14 | MysqlNodeDoesNotExist, 15 | SourceDatabaseCannotBeDeleted, 16 | ) 17 | 18 | from mysql_manager.constants import * 19 | 20 | 21 | class ClusterDataHandler: 22 | def __init__(self) -> None: 23 | self.etcd_client = EtcdClient() 24 | 25 | def validate_cluster_data(self): 26 | ## TODO: no more than one source mysqls 27 | ## TODO: more than one mysqls 28 | ## TODO: no more than one replica mysqls 29 | pass 30 | 31 | def is_cluster_data_available(self): 32 | cluster_data = self.etcd_client.read_cluster_data() 33 | return cluster_data is not None 34 | 35 | def write_cluster_data(self, cluster_data: ClusterData): 36 | self.etcd_client.write_cluster_data(asdict(cluster_data)) 37 | 38 | def write_cluster_data_dict(self, cluster_data: dict): 39 | self.etcd_client.write_cluster_data(cluster_data) 40 | 41 | def get_mysqls(self) -> dict: 42 | cluster_data = self.get_cluster_data() 43 | return cluster_data.mysqls 44 | 45 | def add_mysql(self, name: str, mysql_data: dict) -> None: 46 | cluster_data = self.get_cluster_data() 47 | if name in cluster_data.mysqls: 48 | raise MysqlNodeAlreadyExists(name) 49 | cluster_data.mysqls[name] = MysqlData(**mysql_data) 50 | self.write_cluster_data(cluster_data) 51 | 52 | def remove_mysql(self, name: str) -> None: 53 | cluster_data = self.get_cluster_data() 54 | mysqls = cluster_data.mysqls 55 | if name not in mysqls: 56 | raise MysqlNodeDoesNotExist(name) 57 | if mysqls[name].role == MysqlRoles.SOURCE.value: 58 | raise SourceDatabaseCannotBeDeleted 59 | cluster_data.mysqls.pop(name) 60 | self.write_cluster_data(cluster_data) 61 | 62 | def get_users(self) -> dict: 63 | cluster_data = self.get_cluster_data() 64 | return cluster_data.users 65 | 66 | def get_remote(self) -> MysqlData: 67 | cluster_data = self.get_cluster_data() 68 | return cluster_data.remote 69 | 70 | # def get_proxysql(self) -> dict: 71 | # cluster_data = self.get_cluster_data() 72 | # return cluster_data.proxysqls[0] 73 | 74 | def get_cluster_state(self) -> MysqlClusterState: 75 | cluster_data = self.get_cluster_data() 76 | return cluster_data.status.state 77 | 78 | def set_mysql_role(self, name: str, role: MysqlRoles): 79 | cluster_data = self.get_cluster_data() 80 | cluster_data.mysqls[name].role = role 81 | self.write_cluster_data(cluster_data) 82 | 83 | def update_cluster_state(self, state: MysqlClusterState) -> None: 84 | cluster_data = self.get_cluster_data() 85 | cluster_data.status.state = state 86 | self.write_cluster_data(cluster_data) 87 | 88 | def set_fail_interval(self, fail_interval: int) -> None: 89 | if fail_interval < MINIMUM_FAIL_INTERVAL: 90 | raise FailIntervalLessThanMinimumError 91 | cluster_data = self.get_cluster_data() 92 | cluster_data.fail_interval = fail_interval 93 | self.write_cluster_data(cluster_data) 94 | 95 | def get_fail_interval(self,) -> int: 96 | cluster_data = self.get_cluster_data() 97 | return cluster_data.fail_interval 98 | 99 | def get_cluster_data(self) -> ClusterData: 100 | ## TODO: handle null value of cluster 101 | cluster_data_dict = self.etcd_client.read_cluster_data() 102 | mysqls = {} 103 | for name, mysql in cluster_data_dict["mysqls"].items(): 104 | mysqls[name] = MysqlData(**mysql) 105 | 106 | remote_dict = cluster_data_dict.get("remote") 107 | remote = MysqlData(**remote_dict) if remote_dict is not None else None 108 | 109 | cluster_data = ClusterData( 110 | mysqls=mysqls, 111 | users=cluster_data_dict["users"], 112 | status=ClusterStatus(state=cluster_data_dict["status"]["state"]), 113 | remote=remote, 114 | fail_interval=cluster_data_dict.get("fail_interval", MINIMUM_FAIL_INTERVAL) 115 | ) 116 | 117 | return cluster_data 118 | 119 | -------------------------------------------------------------------------------- /mysql_manager/constants.py: -------------------------------------------------------------------------------- 1 | DEFAULT_CONFIG_PATH = "/etc/mm/cluster-spec.yaml" 2 | DEFAULT_DATABASE = "hamdb" 3 | RETRY_WAIT_SECONDS = 10 4 | CLUSTER_CHECK_INTERVAL_SECONDS = 5 5 | CLONE_COMPATIBILITY_CHECK_INTERVAL_SECONDS = 20 6 | MINIMUM_FAIL_INTERVAL = 15 7 | MASTER_FAILURE_THRESHOLD = 3 8 | CLUSTER_STATE_FILE_PATH = "/tmp/cluster-state.yaml" 9 | REMOTE_SOURCE_NAME = "remote_source" 10 | -------------------------------------------------------------------------------- /mysql_manager/dto.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | 3 | from mysql_manager.enums import MysqlClusterState, MysqlRoles 4 | 5 | 6 | @dataclass 7 | class MysqlData: 8 | role: MysqlRoles 9 | host: str 10 | user: str 11 | password: str 12 | port: int = 3306 13 | 14 | 15 | @dataclass 16 | class ClusterStatus: 17 | state: MysqlClusterState 18 | 19 | 20 | ## TODO: define exact dtos for dicts 21 | @dataclass 22 | class ClusterData: 23 | mysqls: dict[str: MysqlData] 24 | remote: MysqlData | None 25 | status: ClusterStatus 26 | users: dict[str: str] 27 | fail_interval: int 28 | 29 | @dataclass 30 | class MysqlPlugin: 31 | name: str 32 | status: str 33 | plugin_type: str 34 | 35 | def __eq__(self, other) -> bool: 36 | return isinstance(other, MysqlPlugin) and self.name == other.name 37 | 38 | def __hash__(self) -> int: 39 | return hash(self.name) 40 | 41 | -------------------------------------------------------------------------------- /mysql_manager/enums.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | class MysqlStatus(Enum): 4 | UP = "up" 5 | DOWN = "down" 6 | NOT_REPLICA = "not_replica" 7 | REPLICATION_THREADS_STOPPED = "replication_threads_stopped" 8 | CLONING_REMOTE = "cloning" 9 | REPLICATING_REMOTE = "replicating_remote" 10 | 11 | 12 | class MysqlReplicationProblem(Enum): 13 | IO_THREAD_NOT_RUNNING = "io_thread_not_running" 14 | SQL_THREAD_NOT_RUNNING = "sql_thread_not_running" 15 | LAST_ERROR = "last_error" # this is for when there is no IO nor SQL errors 16 | IO_ERROR = "io_error" 17 | SQL_ERROR = "sql_error" 18 | REPLICATION_LAG_HIGH = "replication_lag_high" 19 | NOT_REPLICA = "not_replica" 20 | AUTO_POSITION_DISABLED = "auto_position_disabled" 21 | NO_PROBLEM = "no_problem" 22 | 23 | 24 | class MysqlConfigProblem(Enum): 25 | LOGBIN_NOT_ENABLED = "logbin_not_enabled" 26 | LOGBIN_FORMAT = "logbin_format" 27 | GTID_NOT_ENABLED = "gtid_not_enabled" 28 | GTID_CONSISTENCY_NOT_ENABLED = "gtid_consistency_not_enabled" 29 | NO_PROBLEM = "no_problem" 30 | 31 | 32 | class MysqlClusterState(Enum): 33 | CREATED = "created" 34 | NEW = "new" 35 | STANDBY = "standby" 36 | 37 | 38 | class MysqlRoles(Enum): 39 | SOURCE = "source" 40 | REPLICA = "replica" 41 | READONLY_REPLICA = "readonly_replica" 42 | 43 | class PluginStatus(Enum): 44 | ACTIVE = "ACTIVE" 45 | INACTIVE = "INACTIVE" 46 | DISABLED = "DISABLED" 47 | DELETING = "DELETING" 48 | DELETED = "DELETED" 49 | 50 | -------------------------------------------------------------------------------- /mysql_manager/etcd.py: -------------------------------------------------------------------------------- 1 | from etcd3 import Client 2 | import os 3 | import yaml 4 | 5 | class EtcdClient: 6 | def __init__(self) -> None: 7 | self.client = self.create_etcd_client() 8 | 9 | def create_etcd_client(self): 10 | etcd_host = os.getenv("ETCD_HOST") 11 | etcd_port = os.getenv("ETCD_PORT", "2379") 12 | etcd_username = os.getenv("ETCD_USERNAME") 13 | etcd_password = os.getenv("ETCD_PASSWORD") 14 | self.etcd_prefix = os.getenv("ETCD_PREFIX") 15 | client = Client( 16 | host=etcd_host, 17 | username=etcd_username, 18 | password=etcd_password, 19 | port=int(etcd_port) 20 | ) 21 | return client 22 | 23 | def write_cluster_data(self, cluster_data: dict): 24 | self.write(yaml.safe_dump(cluster_data), path="cluster_data") 25 | 26 | def read_cluster_data(self) -> dict: 27 | cluster_data = self.read(path="cluster_data") 28 | if cluster_data is not None: 29 | return yaml.safe_load(cluster_data.decode()) 30 | 31 | def write_spec(self, spec: dict) -> None: 32 | self.write(yaml.safe_dump(spec), path="spec") 33 | 34 | def write_status(self, status: dict) -> None: 35 | self.write(yaml.safe_dump(status), path="status") 36 | 37 | def read_spec(self) -> dict: 38 | spec = self.read(path="spec") 39 | if spec is not None: 40 | return yaml.safe_load(spec.decode()) 41 | 42 | def read_status(self) -> dict: 43 | status = self.read(path="status") 44 | if status is not None: 45 | return yaml.safe_load(status.decode()) 46 | 47 | def write(self, message: str, path: str) -> None: 48 | self.client.auth() 49 | self.client.put(self.etcd_prefix + path, message) 50 | 51 | def read(self, path: str) -> bytes: 52 | self.client.auth() 53 | values = self.client.range(self.etcd_prefix + path).kvs 54 | if values is None or len(values) == 0: 55 | return None 56 | 57 | return values[0].value 58 | -------------------------------------------------------------------------------- /mysql_manager/exceptions/__init__.py: -------------------------------------------------------------------------------- 1 | from mysql_manager.exceptions.exceptions import * 2 | -------------------------------------------------------------------------------- /mysql_manager/exceptions/exceptions.py: -------------------------------------------------------------------------------- 1 | from mysql_manager.constants import * 2 | 3 | 4 | class MysqlClusterConfigError(Exception): 5 | def __init__(self) -> None: 6 | super().__init__( 7 | """ 8 | Mysql cluster config is not correct. At least one mysql and one proxysql is needed 9 | """ 10 | ) 11 | 12 | class ProgramKilled(Exception): 13 | def __init__(self) -> None: 14 | super().__init__("Got signal from OS to stop") 15 | 16 | class MysqlConnectionException(Exception): 17 | def __init__(self) -> None: 18 | super().__init__("Could not connect to MySQL") 19 | 20 | 21 | class MysqlReplicationException(Exception): 22 | def __init__(self) -> None: 23 | super().__init__("Could not start MySQL replication") 24 | 25 | 26 | class MysqlAddPITREventException(Exception): 27 | def __init__(self) -> None: 28 | super().__init__("Could not add PITR Event to Mysql") 29 | 30 | class MysqlNodeDoesNotExist(Exception): 31 | def __init__(self, name: str) -> None: 32 | super().__init__(f"Could not remove {name} node") 33 | 34 | class MysqlNodeAlreadyExists(Exception): 35 | def __init__(self, name: str) -> None: 36 | super().__init__(f"Could not add {name} node") 37 | 38 | class SourceDatabaseCannotBeDeleted(Exception): 39 | def __init__(self) -> None: 40 | super().__init__("Could not remove master database") 41 | 42 | class VariableIsNotSetInDatabase(Exception): 43 | def __init__(self, variable_name: str) -> None: 44 | super().__init__( 45 | f"Variable {variable_name} is not set in database" 46 | ) 47 | 48 | class FailIntervalLessThanMinimumError(Exception): 49 | def __init__(self) -> None: 50 | super().__init__( 51 | f"Variable fail_interval could not be less than {MINIMUM_FAIL_INTERVAL}" 52 | ) 53 | -------------------------------------------------------------------------------- /mysql_manager/helpers/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hamravesh/mysql-manager/02300bce2f1694a094d600960cec0fc3d3947775/mysql_manager/helpers/__init__.py -------------------------------------------------------------------------------- /mysql_manager/helpers/clone_compatibility_checker.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import datetime 3 | from mysql_manager.instance import Mysql 4 | from mysql_manager.enums import PluginStatus 5 | 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | class CloneCompatibilityChecker: 10 | MINIMUM_MAX_ALLOWED_PACKET = 2097152 11 | MUST_BE_THE_SAME_VARIABLES = [ 12 | "innodb_page_size", 13 | "innodb_data_file_path", 14 | "character_set_database", 15 | "collation_database" 16 | ] 17 | 18 | def __init__(self, src: Mysql, remote: Mysql) -> None: 19 | self.src = src 20 | self.remote = remote 21 | 22 | def _log(self, msg) -> None: 23 | print(str(datetime.datetime.now()) + " " + msg) 24 | 25 | def are_required_plugins_installed_on_src(self) -> bool: 26 | src_active_plugins = self.src.get_plugins(status=PluginStatus.ACTIVE.value) 27 | remote_active_plugins = self.remote.get_plugins(status=PluginStatus.ACTIVE.value) 28 | required_plugins_on_src = remote_active_plugins - src_active_plugins 29 | if required_plugins_on_src: 30 | required_plugin_names=[ 31 | plugin.name for plugin in required_plugins_on_src 32 | ] 33 | self._log(f"These plugins should be installed: {required_plugin_names}") 34 | return False 35 | return True 36 | 37 | def are_required_variables_matching(self) -> bool: 38 | """ 39 | Checks if the required MySQL variables are the same between the source and remote databases. 40 | 41 | This function iterates through a predefined list of MySQL variables that must have identical 42 | values in both the source and remote databases. If any variable's value does not match between 43 | the two databases, it logs an error message and returns False. If all variables match, it returns True. 44 | 45 | Returns: 46 | bool: True if all required variables match between the source and remote databases, False otherwise. 47 | """ 48 | for variable in self.MUST_BE_THE_SAME_VARIABLES: 49 | value_in_src = self.src.get_global_variable(variable) 50 | value_in_remote = self.remote.get_global_variable(variable) 51 | if value_in_src != value_in_remote: 52 | self._log(f"Variable {variable} must be the same in source and remote. Source value = {value_in_src}, remote value = {value_in_remote}") 53 | return False 54 | return True 55 | 56 | def is_max_packet_size_valid(self) -> bool: 57 | src_max_allowed_packet = int(self.src.get_global_variable("max_allowed_packet")) 58 | remote_max_allowed_packet = int(self.remote.get_global_variable("max_allowed_packet")) 59 | if src_max_allowed_packet < self.MINIMUM_MAX_ALLOWED_PACKET: 60 | self._log(f"Variable max_allowed_packet has wrong value in source database. It should be more than {self.MINIMUM_MAX_ALLOWED_PACKET} bytes, while current value is {src_max_allowed_packet} bytes.") 61 | return False 62 | if remote_max_allowed_packet < self.MINIMUM_MAX_ALLOWED_PACKET: 63 | self._log(f"Variable max_allowed_packet has wrong value in remote database. It should be more than {self.MINIMUM_MAX_ALLOWED_PACKET} bytes, while current value is {remote_max_allowed_packet} bytes.") 64 | return False 65 | return True 66 | 67 | def is_password_length_valid(self) -> bool: 68 | if len(self.remote.password) > 32: 69 | self._log("The length of replication password should be less than 32") 70 | return False 71 | return True 72 | 73 | def is_clone_possible(self) -> bool: 74 | return all( 75 | ( 76 | self.is_password_length_valid(), 77 | self.is_max_packet_size_valid(), 78 | self.are_required_plugins_installed_on_src(), 79 | self.are_required_variables_matching() 80 | ) 81 | ) 82 | -------------------------------------------------------------------------------- /mysql_manager/helpers/query_builder.py: -------------------------------------------------------------------------------- 1 | class QueryBuilder: 2 | """ 3 | Constructs an SQL query with optional WHERE conditions. 4 | 5 | This function takes a base SQL query and any number of optional keyword 6 | arguments representing WHERE conditions. It appends these conditions to 7 | the base query, using the format "key = %s". If a condition value is None, 8 | it is skipped. 9 | 10 | Args: 11 | base_query (str): The base SQL query to which conditions will be appended. 12 | **where_conditions: Arbitrary keyword arguments representing WHERE conditions. 13 | Each key is the column name, and each value is the value to filter by. 14 | 15 | Returns: 16 | tuple: A tuple containing the constructed query (str) and a list of arguments (list). 17 | The query is ready to be executed with a database cursor's execute method, 18 | using the arguments list for parameter substitution. 19 | 20 | Example: 21 | >>> base_query = "SELECT * FROM INFORMATION_SCHEMA.PLUGINS" 22 | >>> query, args = build(base_query, name="mysqlx") 23 | >>> print(query) 24 | SELECT * FROM INFORMATION_SCHEMA.PLUGINS WHERE name = %s 25 | >>> print(args) 26 | ['mysqlx'] 27 | """ 28 | 29 | @staticmethod 30 | def build(base_query, **where_conditions): 31 | conditions = [] 32 | args = [] 33 | for key, value in where_conditions.items(): 34 | if value is None: 35 | continue 36 | conditions.append(f"{key} = %s") 37 | args.append(value) 38 | if conditions: 39 | base_query += " WHERE " + " AND ".join(conditions) 40 | return base_query, args 41 | -------------------------------------------------------------------------------- /mysql_manager/instance.py: -------------------------------------------------------------------------------- 1 | import textwrap 2 | 3 | import pymysql 4 | from mysql_manager.enums import ( 5 | MysqlConfigProblem, 6 | MysqlReplicationProblem, 7 | MysqlStatus, 8 | ) 9 | from mysql_manager.helpers.query_builder import QueryBuilder 10 | from mysql_manager.dto import MysqlPlugin 11 | from mysql_manager.exceptions import MysqlConnectionException, MysqlReplicationException, MysqlAddPITREventException, VariableIsNotSetInDatabase 12 | from mysql_manager.base import BaseServer 13 | from mysql_manager.constants import DEFAULT_DATABASE 14 | 15 | class Mysql(BaseServer): 16 | def __init__(self, host: str, user: str, password: str, name: str, role: str, port: int=3306) -> None: 17 | self.host = host 18 | self.port = port 19 | self.user = user 20 | self.password = password 21 | self.name = name 22 | self.role = role 23 | self.health_check_failures: int = 0 24 | self.status: MysqlStatus = MysqlStatus.UP.value 25 | self.replicas: list[Mysql] = [] 26 | self.source: Mysql = None 27 | 28 | # self.uptime = -1 29 | # self.server_id: int = -1 30 | # self.server_uuid: int = -1 31 | # self.is_readonly: bool = False 32 | # self.is_binlog_enabled: bool = True 33 | # self.binlog_format: str = "row" 34 | # self.binlog_row_image: str = "full" 35 | # self.is_replica: bool = False 36 | # self.is_replica_sql_running: bool = False 37 | # self.is_replica_io_running: bool = False 38 | # self.using_gtid: bool = False 39 | # self.read_binlog_coordinates = None 40 | # self.exec_binlog_coordinates = None 41 | # self.seconds_behind_master: int = 0 42 | # self.executed_gtid_set: str = "" 43 | 44 | def user_exists(self, user: str) -> bool: 45 | db = self._get_db() 46 | if db is None: 47 | self._log("Could not connect to mysql") 48 | raise MysqlConnectionException() 49 | 50 | with db: 51 | with db.cursor() as cursor: 52 | try: 53 | cursor.execute(f"SHOW GRANTS FOR '{user}'") 54 | cursor.fetchone() 55 | except pymysql.err.OperationalError: 56 | return False 57 | except Exception as e: 58 | self._log(str(e)) 59 | raise e 60 | 61 | return True 62 | 63 | def change_user_password(self, user: str, password: str): 64 | db = self._get_db() 65 | if db is None: 66 | self._log("Could not connect to mysql") 67 | raise MysqlConnectionException() 68 | 69 | with db: 70 | with db.cursor() as cursor: 71 | try: 72 | cursor.execute(f"ALTER USER IF EXISTS '{user}'@'%' IDENTIFIED BY '{password}'") 73 | cursor.execute(f"ALTER USER IF EXISTS '{user}'@'localhost' IDENTIFIED BY '{password}'") 74 | cursor.execute("FLUSH PRIVILEGES") 75 | except Exception as e: 76 | self._log(str(e)) 77 | raise e 78 | 79 | def create_database(self, name: str): 80 | db = self._get_db() 81 | if db is None: 82 | self._log("Could not connect to mysql") 83 | raise MysqlConnectionException() 84 | 85 | with db: 86 | with db.cursor() as cursor: 87 | try: 88 | cursor.execute(f"CREATE DATABASE IF NOT EXISTS {name}") 89 | cursor.fetchone() 90 | except Exception as e: 91 | self._log(str(e)) 92 | raise e 93 | 94 | def create_monitoring_user(self, password: str): 95 | db = self._get_db() 96 | if db is None: 97 | self._log("Could not connect to mysql") 98 | raise MysqlConnectionException() 99 | 100 | with db: 101 | with db.cursor() as cursor: 102 | try: 103 | cursor.execute( 104 | f"CREATE USER IF NOT EXISTS 'exporter'@'%' IDENTIFIED WITH mysql_native_password BY '{password}' WITH MAX_USER_CONNECTIONS 3" 105 | ) 106 | cursor.execute( 107 | "GRANT PROCESS, REPLICATION CLIENT ON *.* TO 'exporter'@'%'" 108 | ) 109 | cursor.execute( 110 | "GRANT SELECT ON performance_schema.* TO 'exporter'@'%'" 111 | ) 112 | cursor.execute("FLUSH PRIVILEGES") 113 | except Exception as e: 114 | self._log(str(e)) 115 | raise e 116 | 117 | def create_nonpriv_user(self, user: str, password: str): 118 | db = self._get_db() 119 | if db is None: 120 | self._log("Could not connect to mysql") 121 | raise MysqlConnectionException() 122 | 123 | access_commands = [ 124 | f"GRANT ALL ON *.* TO `{user}`@`%` WITH GRANT OPTION", 125 | f"REVOKE REPLICATION_SLAVE_ADMIN, CONNECTION_ADMIN, SUPER ON *.* FROM `{user}`@`%`", 126 | ] 127 | with db: 128 | with db.cursor() as cursor: 129 | try: 130 | cursor.execute( 131 | f"CREATE USER IF NOT EXISTS '{user}'@'%' IDENTIFIED WITH mysql_native_password BY '{password}'" 132 | ) 133 | 134 | for command in access_commands: 135 | cursor.execute(command) 136 | cursor.execute("FLUSH PRIVILEGES") 137 | except Exception as e: 138 | self._log(str(e)) 139 | raise e 140 | 141 | def create_new_user(self, user: str, password: str, grants: list[str]): 142 | db = self._get_db() 143 | if db is None: 144 | self._log("Could not connect to mysql") 145 | raise MysqlConnectionException() 146 | 147 | with db: 148 | with db.cursor() as cursor: 149 | try: 150 | cursor.execute( 151 | f"CREATE USER IF NOT EXISTS '{user}'@'%' IDENTIFIED WITH mysql_native_password BY '{password}'" 152 | ) 153 | grants_command = ",".join(grants) 154 | cursor.execute( 155 | f"GRANT {grants_command} ON *.* TO '{user}'@'%'" 156 | ) 157 | cursor.execute("FLUSH PRIVILEGES") 158 | except Exception as e: 159 | self._log(str(e)) 160 | raise e 161 | 162 | def find_config_problems(self) -> list[MysqlConfigProblem]: 163 | db = self._get_db() 164 | if db is None: 165 | self._log("Could not connect to mysql") 166 | raise MysqlConnectionException() 167 | 168 | with db: 169 | with db.cursor() as cursor: 170 | try: 171 | cursor.execute(''' 172 | select @@global.log_bin, @@global.binlog_format, @@global.gtid_mode, @@global.enforce_gtid_consistency 173 | ''') 174 | result = cursor.fetchone() 175 | except Exception as e: 176 | self._log(str(e)) 177 | raise e 178 | 179 | problems = [] 180 | if result["@@global.log_bin"] != 1: 181 | problems.append(MysqlConfigProblem.LOGBIN_NOT_ENABLED.value) 182 | if result["@@global.binlog_format"] != "ROW": 183 | problems.append(MysqlConfigProblem.LOGBIN_FORMAT.value) 184 | if result["@@global.gtid_mode"] != "ON": 185 | problems.append(MysqlConfigProblem.GTID_NOT_ENABLED.value) 186 | if result["@@global.enforce_gtid_consistency"] != "ON": 187 | problems.append(MysqlConfigProblem.GTID_CONSISTENCY_NOT_ENABLED.value) 188 | 189 | return problems 190 | 191 | def is_master_of(self, replica) -> bool: 192 | status = replica.get_replica_status() 193 | if status is None: 194 | return False 195 | 196 | if status["Source_Host"] == self.host: 197 | return True 198 | 199 | return False 200 | 201 | def add_replica(self, replica) -> None: 202 | if replica.is_replica() and not self.is_replica() and self.is_master_of(replica): 203 | self.replicas.append(replica) 204 | 205 | def get_master_status(self) -> dict: 206 | return self.run_command("SHOW MASTER STATUS") 207 | 208 | def get_replica_status(self) -> dict: 209 | try: 210 | return self.run_command("SHOW REPLICA STATUS") 211 | except: 212 | return None 213 | 214 | def is_replica(self) -> bool: 215 | ## TODO: what if replica is not available? 216 | return self.get_replica_status() is not None 217 | 218 | def get_gtid_executed(self) -> str: 219 | res = self.run_command("select @@global.gtid_executed as gtid") 220 | return res.get("gtid") 221 | 222 | def restart_replication(self): 223 | db = self._get_db() 224 | if db is None: 225 | self._log("Could not connect to mysql") 226 | raise MysqlConnectionException() 227 | 228 | with db: 229 | with db.cursor() as cursor: 230 | try: 231 | cursor.execute("stop replica") 232 | cursor.execute("start replica") 233 | except Exception as e: 234 | self._log(str(e)) 235 | raise e 236 | 237 | def has_base_gtid_set(self): 238 | db = self._get_db() 239 | if db is None: 240 | self._log("Could not connect to mysql") 241 | raise MysqlConnectionException() 242 | 243 | with db: 244 | with db.cursor() as cursor: 245 | try: 246 | cursor.execute("select @@global.server_uuid as uuid") 247 | result = cursor.fetchone() 248 | uuid = result.get("uuid") 249 | 250 | cursor.execute("select @@global.gtid_executed as gtid") 251 | result = cursor.fetchone() 252 | return f"{uuid}:1-6" == result.get("gtid") 253 | except Exception as e: 254 | self._log(str(e)) 255 | raise e 256 | 257 | def get_global_variable( 258 | self, name: str 259 | ) -> str: 260 | query = f"select @@{name} as '{name}';" 261 | variable_value = self.run_command(query).get(name) 262 | if variable_value is None: 263 | raise VariableIsNotSetInDatabase( 264 | variable_name=name 265 | ) 266 | return variable_value 267 | 268 | def get_plugins( 269 | self, name: str | None = None, status: str | None = None 270 | ) -> set[MysqlPlugin]: 271 | """ 272 | Retrieve a list of MySQL plugins with optional filtering by name and status. 273 | 274 | This method constructs a SQL query to fetch plugin information from the 275 | INFORMATION_SCHEMA.PLUGINS table, with optional conditions for plugin 276 | name and status. It then executes the query and returns a list of 277 | MysqlPlugin instances based on the results. 278 | 279 | Args: 280 | name (str | None): Optional. The name of the plugin to filter by. Default is None. 281 | status (str | None): Optional. The status of the plugin to filter by. Default is None. 282 | 283 | Returns: 284 | list[MysqlPlugin]: A list of MysqlPlugin instances representing the plugins 285 | that match the given filters. If no filters are provided, all plugins are returned. 286 | 287 | Example: 288 | >>> plugins = mysql.get_plugins(name="example_plugin", status="ACTIVE") 289 | >>> for plugin in plugins: 290 | >>> print(plugin.name, plugin.status, plugin.plugin_type) 291 | """ 292 | query, args = QueryBuilder.build( 293 | "SELECT * FROM INFORMATION_SCHEMA.PLUGINS", 294 | PLUGIN_NAME=name, 295 | PLUGIN_STATUS=status, 296 | ) 297 | 298 | mysql_plugins = self.fetch(query, args) 299 | return { 300 | MysqlPlugin( 301 | name=plugin["PLUGIN_NAME"], 302 | status=plugin["PLUGIN_STATUS"], 303 | plugin_type=plugin["PLUGIN_TYPE"], 304 | ) 305 | for plugin in mysql_plugins 306 | } 307 | 308 | def install_plugin(self, plugin_name: str, plugin_file: str): 309 | """ 310 | Installs a MySQL plugin if it is not already installed. 311 | 312 | This method checks if a plugin with the given name is already installed. 313 | If the plugin is not found, it constructs and runs the SQL command to 314 | install the plugin using the specified plugin file. 315 | 316 | Args: 317 | plugin_name (str): The name of the plugin to install. 318 | plugin_file (str): The name of the shared object file containing the plugin. 319 | 320 | Returns: 321 | None 322 | 323 | Example: 324 | >>> mysql.install_plugin("example_plugin", "example_plugin.so") 325 | """ 326 | plugins = self.get_plugins(name=plugin_name) 327 | if plugins: 328 | return 329 | 330 | command = f"INSTALL PLUGIN {plugin_name} SONAME '{plugin_file}'" 331 | self.run_command(command) 332 | 333 | def find_replication_problems(self) -> list[MysqlReplicationProblem]: 334 | status = self.get_replica_status() 335 | if status is None: 336 | return [MysqlReplicationProblem.NOT_REPLICA.value] 337 | 338 | # values of these two can be used in future: 'Replica_IO_State', 'Replica_SQL_Running_State' 339 | ## TODO: check if keys exist in `status` 340 | problems = [] 341 | if status["Replica_IO_Running"] != "Yes": 342 | problems.append(MysqlReplicationProblem.IO_THREAD_NOT_RUNNING.value) 343 | if status["Replica_SQL_Running"] != "Yes": 344 | problems.append(MysqlReplicationProblem.SQL_THREAD_NOT_RUNNING.value) 345 | if status["Last_Errno"] != 0 and status["Last_Error"] != "": 346 | problems.append(MysqlReplicationProblem.LAST_ERROR.value) 347 | if status["Last_IO_Errno"] != 0 and status["Last_IO_Error"] != "": 348 | problems.append(MysqlReplicationProblem.IO_ERROR.value) 349 | if status["Last_SQL_Errno"] != 0 and status["Last_SQL_Error"] != "": 350 | problems.append(MysqlReplicationProblem.SQL_ERROR.value) 351 | if status["Seconds_Behind_Source"] is not None and status["Seconds_Behind_Source"] > 60: 352 | problems.append(MysqlReplicationProblem.REPLICATION_LAG_HIGH.value) 353 | if status["Auto_Position"] != 1: 354 | problems.append(MysqlReplicationProblem.AUTO_POSITION_DISABLED.value) 355 | 356 | return problems 357 | 358 | def set_source(self, source): 359 | if source.is_replica(): 360 | self._log("This server at "+source.host+" is a replica and can not be set as source") 361 | return 362 | 363 | source_cfg_problems = source.find_config_problems() 364 | if len(source_cfg_problems) != 0: 365 | self._log("Problem in source at "+source.host+" config: " + str(source_cfg_problems)) 366 | return 367 | 368 | self.source = source 369 | 370 | def set_remote_source(self, remote_source): 371 | source_cfg_problems = remote_source.find_config_problems() 372 | if len(source_cfg_problems) != 0: 373 | self._log("Problem in source at "+remote_source.host+" config: " + str(source_cfg_problems)) 374 | return 375 | 376 | self.source = remote_source 377 | 378 | def _generate_change_master_command(self, repl_user: str, repl_password: str) -> str: 379 | return f""" 380 | CHANGE REPLICATION SOURCE TO SOURCE_HOST='{self.source.host}', 381 | SOURCE_PORT={self.source.port}, 382 | SOURCE_USER='{repl_user}', 383 | SOURCE_PASSWORD='{repl_password}', 384 | SOURCE_CONNECT_RETRY = 60, 385 | SOURCE_RETRY_COUNT = 10, 386 | SOURCE_AUTO_POSITION = 1; 387 | """ 388 | 389 | def reset_replication(self): 390 | db = self._get_db() 391 | if db is None: 392 | self._log("Could not connect to mysql") 393 | raise MysqlConnectionException() 394 | with db: 395 | with db.cursor() as cursor: 396 | try: 397 | cursor.execute("stop replica") 398 | cursor.execute("reset replica all") 399 | cursor.execute("set persist read_only=0") 400 | except Exception as e: 401 | self._log(str(e)) 402 | raise e 403 | 404 | self.source = None 405 | 406 | ## TODO: read orchestrator code 407 | ## TODO: check server ids not equal 408 | def start_replication(self, repl_user: str, repl_password: str): 409 | if self.source is None: 410 | self._log("No master set for this instance") 411 | raise MysqlReplicationException() 412 | 413 | if not self.source.ping(): 414 | self._log("Master not accesible") 415 | raise MysqlReplicationException() 416 | 417 | cfg_problems = self.find_config_problems() 418 | if len(cfg_problems) != 0: 419 | self._log("Problem in config: " + str(cfg_problems)) 420 | raise MysqlReplicationException() 421 | 422 | repl_status = self.get_replica_status() 423 | if repl_status is not None and repl_status["Replica_IO_Running"] == "Yes" and repl_status["Replica_SQL_Running"] == "Yes": 424 | self._log("Replication is running") 425 | return 426 | 427 | db = self._get_db() 428 | if db is None: 429 | self._log("Could not connect to mysql") 430 | raise MysqlConnectionException() 431 | 432 | with db: 433 | with db.cursor() as cursor: 434 | try: 435 | cursor.execute( 436 | self._generate_change_master_command(repl_user, repl_password) 437 | ) 438 | cursor.execute("SET PERSIST READ_ONLY=1") 439 | cursor.execute("START REPLICA") 440 | except Exception as e: 441 | self._log(str(e)) 442 | raise MysqlReplicationException() 443 | 444 | def add_pitr_event(self, minute_intervals: int=15): 445 | db = self._get_db() 446 | if db is None: 447 | self._log("Could not connect to mysql") 448 | raise MysqlConnectionException() 449 | 450 | with db: 451 | with db.cursor() as cursor: 452 | try: 453 | cursor.execute("USE mysql;") 454 | cursor.execute( 455 | f"CREATE EVENT IF NOT EXISTS pitr ON SCHEDULE EVERY {minute_intervals} MINUTE DO FLUSH BINARY LOGS;" 456 | ) 457 | except Exception as e: 458 | self._log(str(e)) 459 | raise MysqlAddPITREventException() 460 | -------------------------------------------------------------------------------- /mysql_manager/metrics.py: -------------------------------------------------------------------------------- 1 | from prometheus_client import Counter, Gauge 2 | 3 | # Prometheus Metrics 4 | # Counters 5 | FAILOVER_ATTEMPTS = Counter('mysql_failover_attempts', 'Number of failover attempts made') 6 | REPLICATION_RESTARTS = Counter('mysql_replication_restarts', 'Number of replication restarts on replicas') 7 | CLUSTER_FAILURES = Counter('mysql_cluster_failures', 'Total number of cluster failures (master and replica down)') 8 | 9 | # Gauges 10 | mysql_status_labels = ["host"] 11 | MASTER_UP_STATUS = Gauge( 12 | 'mysql_master_up', 13 | 'Current status of the MySQL master (1=up, 0=down)', 14 | mysql_status_labels, 15 | ) 16 | REPLICA_UP_STATUS = Gauge( 17 | 'mysql_replica_up', 18 | 'Current status of the MySQL replica (1=up, 0=down)', 19 | mysql_status_labels, 20 | ) 21 | -------------------------------------------------------------------------------- /mysql_manager/proxysql.py: -------------------------------------------------------------------------------- 1 | from mysql_manager.instance import Mysql 2 | from mysql_manager.base import BaseServer 3 | from mysql_manager.exceptions import MysqlConnectionException 4 | 5 | 6 | ## TODO: move mysql related passwords to initialize function 7 | class ProxySQL(BaseServer): 8 | def __init__( 9 | self, 10 | host: str, 11 | user: str, 12 | password: str, 13 | mysql_user: str, 14 | mysql_password: str, 15 | monitor_user: str, 16 | monitor_password: str, 17 | ) -> None: 18 | super().__init__(host, user, password, 6032) 19 | self.mysql_user = mysql_user 20 | self.mysql_password = mysql_password 21 | self.monitor_user = monitor_user 22 | self.monitor_password = monitor_password 23 | self.backends: dict[str: Mysql] = [] 24 | 25 | def add_backend(self, instance: Mysql, read_weight: int=1, is_writer: bool=False): 26 | db = self._get_db() 27 | if db is None: 28 | self._log("Could not connect to proxysql") 29 | raise MysqlConnectionException() 30 | 31 | with db: 32 | with db.cursor() as cursor: 33 | try: 34 | cursor.execute(f"INSERT INTO mysql_servers(hostgroup_id, hostname, port, weight) VALUES (1,'{instance.host}',3306, {read_weight})") 35 | if is_writer: 36 | cursor.execute(f"INSERT INTO mysql_servers(hostgroup_id, hostname, port) VALUES (0,'{instance.host}',3306)") 37 | cursor.execute("load mysql servers to runtime") 38 | cursor.execute("save mysql servers to disk") 39 | self.backends.append(instance) 40 | except Exception as e: 41 | self._log(str(e)) 42 | raise e 43 | 44 | def remove_backend(self, instance: Mysql): 45 | db = self._get_db() 46 | if db is None: 47 | self._log("Could not connect to proxysql") 48 | raise MysqlConnectionException() 49 | 50 | with db: 51 | with db.cursor() as cursor: 52 | try: 53 | cursor.execute(f"delete from mysql_servers where hostname='{instance.host}'") 54 | cursor.execute("load mysql servers to runtime") 55 | cursor.execute("save mysql servers to disk") 56 | except Exception as e: 57 | self._log(str(e)) 58 | raise e 59 | 60 | def find_backend_problems(self): 61 | pass 62 | 63 | def find_proxysql_problems(self): 64 | pass 65 | 66 | def initialize_setup(self): 67 | db = self._get_db() 68 | if db is None: 69 | self._log("Could not connect to mysql") 70 | raise MysqlConnectionException() 71 | 72 | with db: 73 | with db.cursor() as cursor: 74 | try: 75 | ## TODO: delete all old servers and configs 76 | cursor.execute("select * from mysql_replication_hostgroups") 77 | result = cursor.fetchone() 78 | if result is None: 79 | cursor.execute("INSERT INTO mysql_replication_hostgroups (writer_hostgroup,reader_hostgroup,comment) VALUES (0,1,'main')") 80 | cursor.execute("load mysql servers to runtime") 81 | cursor.execute("save mysql servers to disk") 82 | cursor.execute(f"UPDATE global_variables SET variable_value='{self.monitor_user}' WHERE variable_name='mysql-monitor_username'") 83 | cursor.execute(f"UPDATE global_variables SET variable_value='{self.monitor_password}' WHERE variable_name='mysql-monitor_password'") 84 | cursor.execute("load mysql variables to runtime") 85 | cursor.execute("save mysql variables to disk") 86 | # cursor.execute("INSERT INTO mysql_query_rules (active, match_digest, destination_hostgroup, apply) VALUES (1, '^SELECT.*', 1, 0)") 87 | # cursor.execute("load mysql query rules to runtime") 88 | # cursor.execute("save mysql query rules to disk") 89 | cursor.execute("select * from mysql_users") 90 | result = cursor.fetchone() 91 | if result is None: 92 | cursor.execute(f"INSERT INTO mysql_users (username,password) VALUES ('{self.mysql_user}','{self.mysql_password}')") 93 | cursor.execute("load mysql users to runtime") 94 | cursor.execute("save mysql users to disk") 95 | result = cursor.fetchall() 96 | except Exception as e: 97 | self._log(str(e)) 98 | raise e 99 | 100 | def is_configured(self) -> bool: 101 | servers = None 102 | try: 103 | servers = self.run_command("select * from mysql_servers") 104 | except Exception as e: 105 | self._log(e) 106 | 107 | return servers is not None 108 | 109 | def split_read_write(self, is_active): 110 | db = self._get_db() 111 | if db is None: 112 | print("Could not connect to proxysql") 113 | raise MysqlConnectionException 114 | 115 | with db: 116 | with db.cursor() as cursor: 117 | try: 118 | cursor.execute("SELECT * FROM mysql_query_rules;") 119 | result = cursor.fetchone() 120 | if is_active and result is None: 121 | cursor.execute("INSERT INTO mysql_query_rules (active, match_digest, destination_hostgroup, apply) VALUES (1, '^SELECT.*', 1, 0);") 122 | result = cursor.fetchone() 123 | self._log(str(result)) 124 | elif not is_active and result is not None: 125 | cursor.execute("DELETE FROM mysql_query_rules;") 126 | result = cursor.fetchone() 127 | self._log(str(result)) 128 | 129 | except Exception as e: 130 | self._log(str(e)) 131 | raise e -------------------------------------------------------------------------------- /mysql_manager/tests.py: -------------------------------------------------------------------------------- 1 | from mysql_manager.instance import Mysql 2 | from mysql_manager.proxysql import ProxySQL 3 | from mysql_manager.cluster import ClusterManager 4 | 5 | def test_normal_info_wrong(): 6 | tests = [ 7 | {"host": "wrong-host", "user": "root", "password": "root"}, 8 | {"host": "test-mysql-s1-svc", "user": "wrong", "password": "root"}, 9 | {"host": "test-mysql-s1-svc", "user": "root", "password": "wrong"}, 10 | ] 11 | for i in tests: 12 | print(i) 13 | inst = Mysql(**i) 14 | print("pinged: " + str(inst.ping())) 15 | inst.get_master_status() 16 | print("config problem: " + str(inst.find_config_problems())) 17 | print("\n\n") 18 | 19 | def test_timeout(): 20 | tests = [ 21 | {"host": "test-mysql-s1-svc", "user": "root", "password": "root", "port": 3304}, 22 | ] 23 | for i in tests: 24 | print(i) 25 | inst = Mysql(**i) 26 | print("pinged: " + str(inst.ping())) 27 | print("\n\n") 28 | 29 | def test_replication(): 30 | tests = [ 31 | {"host": "test-mysql-s1-svc", "user": "root", "password": "root"}, 32 | {"host": "test-mysql-s2-svc", "user": "root", "password": "root"}, 33 | ] 34 | src = Mysql(**tests[0]) 35 | repl = Mysql(**tests[1]) 36 | src.create_new_user("replica", "replica", ["REPLICATION SLAVE"]) 37 | print("user replica exists: ", str(src.user_exists("replica", []))) 38 | src.add_replica(repl) 39 | repl.set_source(src) 40 | repl.start_replication("replica", "replica") 41 | print("is src replica: " + str(src.is_replica())) 42 | print("is repl replica: " + str(repl.is_replica())) 43 | print("src config problems: " + str(src.find_config_problems())) 44 | print("repl config problems: " + str(repl.find_config_problems())) 45 | print("src replication problems: " + str(src.find_replication_problems())) 46 | print("repl replication problems: " + str(repl.find_replication_problems())) 47 | src.add_replica(repl) 48 | print("src replica: " + src.replicas[0].host) 49 | print("is src master of repl: " + str(src.is_master_of(repl))) 50 | 51 | def test_proxysql(): 52 | tests = [ 53 | {"host": "test-mysql-s1-svc", "user": "root", "password": "root"}, 54 | {"host": "test-mysql-s2-svc", "user": "root", "password": "root"}, 55 | {"host": "test-proxysql-svc", "user": "radmin", "password": "radmin", "mysql_user": "str", 56 | "mysql_password": "str", 57 | "monitor_user": "str", 58 | "monitor_password": "str"}, 59 | ] 60 | px = ProxySQL(**tests[2]) 61 | print("Pinged: " + str(px.ping())) 62 | 63 | def test_cluster_read_config(): 64 | clm = ClusterManager("./tests/config/mm-config-mysql-1.yaml") 65 | assert clm.src.host == "mysql-s1" 66 | assert clm.src.user == "root" 67 | assert clm.src.password == "root" 68 | # print(clm.repl) 69 | assert len(clm.proxysqls) == 1 70 | assert clm.proxysqls[0].host == "proxysql" 71 | assert clm.proxysqls[0].user == "radmin" 72 | assert clm.proxysqls[0].password == "pwd" 73 | assert clm.users == { 74 | "replPassword": "password", 75 | "exporterPassword": "exporter", 76 | "nonprivPassword": "password", 77 | "nonprivUser": "dbadmin", 78 | "proxysqlMonPassword": "password", 79 | } 80 | 81 | 82 | if __name__ == "__main__": 83 | # test_normal_info_wrong() 84 | # test_timeout() 85 | test_cluster_read_config() 86 | -------------------------------------------------------------------------------- /pip.conf: -------------------------------------------------------------------------------- 1 | [global] 2 | index-url = https://repo.hsre.ir/artifactory/api/pypi/pypi/simple -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "mysql-manager" 3 | version = "0.2.0" 4 | description = "" 5 | authors = ["aliakbar "] 6 | readme = "README.md" 7 | 8 | [tool.poetry.dependencies] 9 | python = "^3.11" 10 | pymysql = "^1.1.0" 11 | cryptography = "^42.0.8" 12 | click = "^8.1.7" 13 | pyyaml = "^6.0.2" 14 | prometheus-client = "^0.20.0" 15 | etcd3-py = "^0.1.6" 16 | 17 | [build-system] 18 | requires = ["poetry-core"] 19 | build-backend = "poetry.core.masonry.api" 20 | -------------------------------------------------------------------------------- /requirements.test.txt: -------------------------------------------------------------------------------- 1 | behave==1.2.6 2 | testcontainers==4.9.0 3 | xmltodict==0.14.2 4 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | aiohappyeyeballs==2.4.3 ; python_version >= "3.11" and python_version < "4.0" 2 | aiohttp==3.10.9 ; python_version >= "3.11" and python_version < "4.0" 3 | aiosignal==1.3.1 ; python_version >= "3.11" and python_version < "4.0" 4 | attrs==24.2.0 ; python_version >= "3.11" and python_version < "4.0" 5 | certifi==2024.8.30 ; python_version >= "3.11" and python_version < "4.0" 6 | cffi==1.16.0 ; python_version >= "3.11" and python_version < "4.0" and platform_python_implementation != "PyPy" 7 | charset-normalizer==3.3.2 ; python_version >= "3.11" and python_version < "4.0" 8 | click==8.1.7 ; python_version >= "3.11" and python_version < "4.0" 9 | colorama==0.4.6 ; python_version >= "3.11" and python_version < "4.0" and platform_system == "Windows" 10 | cryptography==42.0.8 ; python_version >= "3.11" and python_version < "4.0" 11 | etcd3-py==0.1.6 ; python_version >= "3.11" and python_version < "4.0" 12 | frozenlist==1.4.1 ; python_version >= "3.11" and python_version < "4.0" 13 | idna==3.8 ; python_version >= "3.11" and python_version < "4.0" 14 | multidict==6.1.0 ; python_version >= "3.11" and python_version < "4.0" 15 | prometheus-client==0.20.0 ; python_version >= "3.11" and python_version < "4.0" 16 | pycparser==2.22 ; python_version >= "3.11" and python_version < "4.0" and platform_python_implementation != "PyPy" 17 | pymysql==1.1.1 ; python_version >= "3.11" and python_version < "4.0" 18 | pyopenssl==24.2.1 ; python_version >= "3.11" and python_version < "4.0" 19 | pyyaml==6.0.2 ; python_version >= "3.11" and python_version < "4.0" 20 | requests==2.32.3 ; python_version >= "3.11" and python_version < "4.0" 21 | semantic-version==2.10.0 ; python_version >= "3.11" and python_version < "4.0" 22 | six==1.16.0 ; python_version >= "3.11" and python_version < "4.0" 23 | urllib3==2.2.2 ; python_version >= "3.11" and python_version < "4.0" 24 | yarl==1.13.1 ; python_version >= "3.11" and python_version < "4.0" 25 | -------------------------------------------------------------------------------- /scripts/check-servers-up.py: -------------------------------------------------------------------------------- 1 | from mysql_manager.instance import Mysql 2 | from mysql_manager.proxysql import ProxySQL 3 | import os, time, json 4 | 5 | MYSQL_S1_HOST = os.getenv("MYSQL_S1_HOST") 6 | MYSQL_S2_HOST = os.getenv("MYSQL_S2_HOST") 7 | MYSQL_ROOT_PASSWORD = os.getenv("MYSQL_ROOT_PASSWORD") 8 | MYSQL_REPL_PASSWORD = os.getenv("MYSQL_REPL_PASSWORD") 9 | MYSQL_EXPORTER_PASSWORD = os.getenv("MYSQL_EXPORTER_PASSWORD") 10 | PROXYSQL_HOST = os.getenv("PROXYSQL_HOST") 11 | PROXYSQL_PASSWORD = os.getenv("PROXYSQL_PASSWORD") 12 | PROXYSQL_MON_PASSWORD = os.getenv("PROXYSQL_MON_PASSWORD") 13 | 14 | src = Mysql(MYSQL_S1_HOST, "root", MYSQL_ROOT_PASSWORD) 15 | repl = Mysql(MYSQL_S2_HOST, "root", MYSQL_ROOT_PASSWORD) 16 | px = ProxySQL( 17 | PROXYSQL_HOST, 18 | "radmin", 19 | PROXYSQL_PASSWORD, 20 | "root", 21 | MYSQL_ROOT_PASSWORD, 22 | "proxysql", 23 | PROXYSQL_MON_PASSWORD 24 | ) 25 | 26 | status = {"src": "down", "repl": "down", "proxy": "down"} 27 | src_ping = src.ping() 28 | repl_ping = repl.ping() 29 | proxy_ping = px.ping() 30 | src_command = src.run_command("select 1") 31 | repl_command = repl.run_command("select 1") 32 | proxy_command = px.run_command("select 1") 33 | # print(proxy_ping, proxy_command) 34 | 35 | if src_ping and src_command.get("1", "0") == 1: 36 | status["src"] = "up" 37 | if repl_ping and repl_command.get("1", "0") == 1: 38 | status["repl"] = "up" 39 | if proxy_ping and proxy_command.get("1", "0") == "1": 40 | status["proxy"] = "up" 41 | 42 | print(json.dumps(status)) 43 | -------------------------------------------------------------------------------- /scripts/start-replication-cli.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #export MYSQL_S1_HOST=mysql-s1 4 | #export MYSQL_S2_HOST=mysql-s2 5 | 6 | echo -e "\nadd mysql instance: " 7 | python /app/cli/mysql-cli.py mysql add -n mysql-s1-instance -h $MYSQL_S1_HOST -u root -p root 8 | python /app/cli/mysql-cli.py mysql add -n mysql-s2-instance -h $MYSQL_S2_HOST -u root -p root 9 | echo -e "\nDone" 10 | 11 | echo -e "\nping mysql: " 12 | python /app/cli/mysql-cli.py mysql ping -n mysql-s1-instance 13 | 14 | echo -e "\nget-info mysql: " 15 | python /app/cli/mysql-cli.py mysql get-info -n mysql-s1-instance -c "select 1" 16 | 17 | echo -e "\ncreate-user: " 18 | python /app/cli/mysql-cli.py mysql create-user -n mysql-s1-instance --user replica-user --password repl --roles "REPLICATION SLAVE" 19 | 20 | echo -e "\nadd-replica: " 21 | python /app/cli/mysql-cli.py mysql add-replica --master mysql-s1-instance --replica mysql-s2-instance 22 | 23 | echo -e "\nstart-replication: " 24 | python /app/cli/mysql-cli.py mysql start-replication --master mysql-s1-instance --replica mysql-s2-instance --repl-user replica-user --repl-password repl 25 | 26 | echo -e "\ncreate-monitoring-user: " 27 | python /app/cli/mysql-cli.py mysql create-monitoring-user -n mysql-s1-instance --password exporter 28 | -------------------------------------------------------------------------------- /scripts/start-replication-with-proxysql-cli.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #export MYSQL_S1_HOST=mysql-s1 4 | #export MYSQL_S2_HOST=mysql-s2 5 | 6 | 7 | echo -e "\nadd mysql instance: " 8 | python /app/cli/mysql-cli.py mysql add -n mysql-s1-instance -h $MYSQL_S1_HOST -u root -p root 9 | python /app/cli/mysql-cli.py mysql add -n mysql-s2-instance -h $MYSQL_S2_HOST -u root -p root 10 | echo -e "\nDone" 11 | 12 | echo -e "\nping mysql: " 13 | python /app/cli/mysql-cli.py mysql ping -n mysql-s1-instance 14 | 15 | echo -e "\nget-info mysql: " 16 | python /app/cli/mysql-cli.py mysql get-info -n mysql-s1-instance -c "select 1" 17 | 18 | echo -e "\ncreate-user: " 19 | python /app/cli/mysql-cli.py mysql create-user -n mysql-s1-instance --user replica-user --password repl --roles "REPLICATION SLAVE" 20 | 21 | echo -e "\nadd-replica: " 22 | python /app/cli/mysql-cli.py mysql add-replica --master mysql-s1-instance --replica mysql-s2-instance 23 | 24 | echo -e "\nstart-replication: " 25 | python /app/cli/mysql-cli.py mysql start-replication --master mysql-s1-instance --replica mysql-s2-instance --repl-user replica-user --repl-password repl 26 | 27 | echo -e "\ncreate-monitoring-user: " 28 | python /app/cli/mysql-cli.py mysql create-monitoring-user -n mysql-s1-instance --password exporter 29 | 30 | echo -e "\nproxysql operations: " 31 | python /app/cli/mysql-cli.py mysql create-user -n mysql-s1-instance --user proxysql --password pass --roles "USAGE,REPLICATION CLIENT" 32 | 33 | echo -e "\nset pitr event: " 34 | python /app/cli/mysql-cli.py mysql add-pitr-event -n mysql-s1-instance -i 15 35 | 36 | echo -e "\nSleeping for 10 seconds..." 37 | sleep 10 38 | 39 | echo -e "\nproxysql add: " 40 | python /app/cli/mysql-cli.py proxysql add -n proxysql-instance -h proxysql -u radmin -p pwd 41 | 42 | echo -e "\nproxysql initialize: " 43 | python /app/cli/mysql-cli.py proxysql initialize -n proxysql-instance --mysql-user root --mysql-password root --monitor-user exporter --monitor-password exporter 44 | 45 | echo -e "\nproxysql add-backend: " 46 | python /app/cli/mysql-cli.py proxysql add-backend --mysql-name mysql-s1-instance --proxysql-name proxysql-instance --read-weight 1 --is-writer 47 | python /app/cli/mysql-cli.py proxysql add-backend --mysql-name mysql-s2-instance --proxysql-name proxysql-instance --read-weight 1 -------------------------------------------------------------------------------- /scripts/start-replication-with-proxysql.py: -------------------------------------------------------------------------------- 1 | from mysql_manager.instance import Mysql 2 | from mysql_manager.proxysql import ProxySQL 3 | import os, time 4 | 5 | 6 | MYSQL_S1_HOST = os.getenv("MYSQL_S1_HOST") 7 | MYSQL_S2_HOST = os.getenv("MYSQL_S2_HOST") 8 | MYSQL_ROOT_PASSWORD = os.getenv("MYSQL_ROOT_PASSWORD") 9 | MYSQL_REPL_PASSWORD = os.getenv("MYSQL_REPL_PASSWORD") 10 | MYSQL_EXPORTER_PASSWORD = os.getenv("MYSQL_EXPORTER_PASSWORD") 11 | PROXYSQL_HOST = os.getenv("PROXYSQL_HOST") 12 | PROXYSQL_PASSWORD = os.getenv("PROXYSQL_PASSWORD") 13 | PROXYSQL_MON_PASSWORD = os.getenv("PROXYSQL_MON_PASSWORD") 14 | 15 | src = Mysql(MYSQL_S1_HOST, "root", MYSQL_ROOT_PASSWORD) 16 | repl = Mysql(MYSQL_S2_HOST, "root", MYSQL_ROOT_PASSWORD) 17 | src.create_new_user("replica", MYSQL_REPL_PASSWORD, ["REPLICATION SLAVE"]) 18 | src.add_replica(repl) 19 | repl.set_source(src) 20 | repl.start_replication("replica", MYSQL_REPL_PASSWORD) 21 | 22 | ## create monitoring user 23 | src.create_monitoring_user(MYSQL_EXPORTER_PASSWORD) 24 | 25 | src.create_new_user("proxysql", PROXYSQL_MON_PASSWORD, ["USAGE", "REPLICATION CLIENT"]) 26 | time.sleep(5) 27 | px = ProxySQL( 28 | PROXYSQL_HOST, 29 | "radmin", 30 | PROXYSQL_PASSWORD, 31 | "root", 32 | MYSQL_ROOT_PASSWORD, 33 | "proxysql", 34 | PROXYSQL_MON_PASSWORD 35 | ) 36 | px.initialize_setup() 37 | px.add_backend(src, 1, True) 38 | px.add_backend(repl, 1, False) 39 | -------------------------------------------------------------------------------- /scripts/start-replication.py: -------------------------------------------------------------------------------- 1 | from mysql_manager.instance import Mysql 2 | from mysql_manager.proxysql import ProxySQL 3 | import os 4 | 5 | 6 | MYSQL_S1_HOST = os.getenv("MYSQL_S1_HOST") 7 | MYSQL_S2_HOST = os.getenv("MYSQL_S2_HOST") 8 | MYSQL_ROOT_PASSWORD = os.getenv("MYSQL_ROOT_PASSWORD") 9 | MYSQL_REPL_PASSWORD = os.getenv("MYSQL_REPL_PASSWORD") 10 | MYSQL_EXPORTER_PASSWORD = os.getenv("MYSQL_EXPORTER_PASSWORD") 11 | 12 | src = Mysql(MYSQL_S1_HOST, "root", MYSQL_ROOT_PASSWORD) 13 | repl = Mysql(MYSQL_S2_HOST, "root", MYSQL_ROOT_PASSWORD) 14 | src.create_new_user("replica", MYSQL_REPL_PASSWORD, ["REPLICATION SLAVE"]) 15 | src.add_replica(repl) 16 | repl.set_source(src) 17 | repl.start_replication("replica", MYSQL_REPL_PASSWORD) 18 | 19 | ## create monitoring user 20 | src.create_monitoring_user(MYSQL_EXPORTER_PASSWORD) 21 | -------------------------------------------------------------------------------- /scripts/start-simple-with-proxysql-cli.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #export MYSQL_S1_HOST=mysql-s1 4 | #export MYSQL_S2_HOST=mysql-s2 5 | 6 | echo -e "\nadd mysql instance: " 7 | python /app/cli/mysql-cli.py mysql add -n mysql-s1-instance -h $MYSQL_S1_HOST -u root -p root 8 | echo -e "\nDone" 9 | 10 | echo -e "\nping mysql: " 11 | python /app/cli/mysql-cli.py mysql ping -n mysql-s1-instance 12 | 13 | echo -e "\nget-info mysql: " 14 | python /app/cli/mysql-cli.py mysql get-info -n mysql-s1-instance -c "select 1" 15 | 16 | echo -e "\ncreate-user: " 17 | python /app/cli/mysql-cli.py mysql create-user -n mysql-s1-instance --user replica-user --password repl --roles "REPLICATION SLAVE" 18 | 19 | echo -e "\ncreate-monitoring-user: " 20 | python /app/cli/mysql-cli.py mysql create-monitoring-user -n mysql-s1-instance --password exporter 21 | 22 | echo -e "\nproxysql operations: " 23 | python /app/cli/mysql-cli.py mysql create-user -n mysql-s1-instance --user proxysql --password pass --roles "USAGE,REPLICATION CLIENT" 24 | 25 | echo -e "\nproxysql add: " 26 | python /app/cli/mysql-cli.py proxysql add -n proxysql-instance -h proxysql -u radmin -p pwd 27 | 28 | echo -e "\nproxysql initialize: " 29 | python /app/cli/mysql-cli.py proxysql initialize -n proxysql-instance --mysql-user root --mysql-password root --monitor-user exporter --monitor-password exporter 30 | 31 | echo -e "\nproxysql add-backend: " 32 | python /app/cli/mysql-cli.py proxysql add-backend --mysql-name mysql-s1-instance --proxysql-name proxysql-instance --read-weight 1 --is-writer 33 | -------------------------------------------------------------------------------- /scripts/start-simple-with-proxysql.py: -------------------------------------------------------------------------------- 1 | from mysql_manager.instance import Mysql 2 | from mysql_manager.proxysql import ProxySQL 3 | import os, time 4 | 5 | 6 | MYSQL_S1_HOST = os.getenv("MYSQL_S1_HOST") 7 | # MYSQL_S2_HOST = os.getenv("MYSQL_S2_HOST") 8 | MYSQL_ROOT_PASSWORD = os.getenv("MYSQL_ROOT_PASSWORD") 9 | MYSQL_EXPORTER_PASSWORD = os.getenv("MYSQL_EXPORTER_PASSWORD") 10 | # MYSQL_REPL_PASSWORD = os.getenv("MYSQL_REPL_PASSWORD") 11 | PROXYSQL_HOST = os.getenv("PROXYSQL_HOST") 12 | PROXYSQL_PASSWORD = os.getenv("PROXYSQL_PASSWORD") 13 | PROXYSQL_MON_PASSWORD = os.getenv("PROXYSQL_MON_PASSWORD") 14 | 15 | src = Mysql(MYSQL_S1_HOST, "root", MYSQL_ROOT_PASSWORD) 16 | src.create_monitoring_user(MYSQL_EXPORTER_PASSWORD) 17 | 18 | src.create_new_user("proxysql", PROXYSQL_MON_PASSWORD, ["USAGE", "REPLICATION CLIENT"]) 19 | time.sleep(5) 20 | px = ProxySQL( 21 | PROXYSQL_HOST, 22 | "radmin", 23 | PROXYSQL_PASSWORD, 24 | "root", 25 | MYSQL_ROOT_PASSWORD, 26 | "proxysql", 27 | PROXYSQL_MON_PASSWORD 28 | ) 29 | px.initialize_setup() 30 | px.add_backend(src, 1, True) 31 | # px.add_backend(repl, 1, False) 32 | -------------------------------------------------------------------------------- /tests/cli-mysql-1-proxysql.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source ./setup-etcd.sh 4 | 5 | echo "Creating servers..." 6 | docker compose down 7 | docker rm -f mm 8 | 9 | docker compose up -d 10 | setup_user 11 | docker build ./../ -t mysql-manager:latest 12 | docker run -d \ 13 | -v ./config/mm-config-mysql-1.yaml:/etc/mm/cluster-spec.yaml \ 14 | --network mysql-manager_default --name mm \ 15 | -e ETCD_HOST=etcd -e ETCD_USERNAME=mm -e ETCD_PASSWORD=password -e ETCD_PREFIX=mm/cluster1/ \ 16 | -p 8000:8000 mysql-manager:latest 17 | docker exec mm python cli/mysql-cli.py init -f /etc/mm/cluster-spec.yaml 18 | sleep 30 19 | 20 | 21 | echo -e "\n\nCreating db through proxysql..." 22 | docker compose exec mysql-s1 mysql -uhamadmin -ppassword -h proxysql -e "use hamdb; CREATE TABLE t1 (c1 INT PRIMARY KEY, c2 TEXT NOT NULL);INSERT INTO t1 VALUES (1, 'Luis');" 23 | sleep 5 24 | 25 | echo -e "\n\nChecking through proxysql..." 26 | docker compose exec mysql-s1 mysql -uhamadmin -ppassword -h proxysql -e "select * from hamdb.t1;" 27 | 28 | echo -e "\n\nChecking master..." 29 | docker compose exec mysql-s1 mysql -uroot -proot -e "select * from hamdb.t1;" 30 | 31 | echo -e "\n\nChecking events in master..." 32 | docker compose exec mysql-s1 mysql -uroot -proot -e "USE mysql; SHOW EVENTS;" 33 | 34 | echo -e "\n\nChecking default user..." 35 | docker compose exec mysql-s1 mysql -uroot -proot -e "SELECT user FROM mysql.user" 36 | docker compose exec mysql-s1 mysql -uroot -proot -e "show grants for hamadmin" 37 | 38 | echo -e "\n\nChecking default database..." 39 | docker compose exec mysql-s1 mysql -uroot -proot -e "show databases" 40 | 41 | echo -e "\n\nChecking proxysql config and stats..." 42 | sleep 10 43 | docker compose exec mysql-s1 mysql -uradmin -ppwd -h proxysql -P6032 -e "select * from runtime_mysql_servers;" 44 | docker compose exec mysql-s1 mysql -uradmin -ppwd -h proxysql -P6032 -e "SELECT * FROM monitor.mysql_server_connect_log ORDER BY time_start_us DESC LIMIT 6" 45 | docker compose exec mysql-s1 mysql -uradmin -ppwd -h proxysql -P6032 -e "select Queries, srv_host from stats_mysql_connection_pool\G" 46 | docker compose exec mysql-s1 mysql -uradmin -ppwd -h proxysql -P6032 -e "select * from stats_mysql_query_rules" 47 | 48 | 49 | echo -e "\n\nChecking metrics from exporter..." 50 | curl localhost:9105/metrics | grep mysql_up 51 | 52 | echo -e "\n\nTesting cluster status..." 53 | docker compose down mysql-s1 54 | sleep 5 55 | docker exec mm python /app/cli/mysql-cli.py mysql get-cluster-status 56 | 57 | echo -e "\n\nDestroying servers..." 58 | sleep 5 59 | docker compose down 60 | docker rm -f mm 61 | -------------------------------------------------------------------------------- /tests/cli-mysql-1-to-2-proxysql.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source ./setup-etcd.sh 4 | 5 | echo "Creating servers..." 6 | docker compose down 7 | docker rm -f mm 8 | 9 | docker compose up -d 10 | setup_user 11 | docker build ./../ -t mysql-manager:latest 12 | docker run -d \ 13 | -v ./config/mm-config-mysql-1.yaml:/etc/mm/cluster-spec.yaml \ 14 | --network mysql-manager_default --name mm \ 15 | -e ETCD_HOST=etcd -e ETCD_USERNAME=mm -e ETCD_PASSWORD=password -e ETCD_PREFIX=mm/cluster1/ \ 16 | -p 8000:8000 mysql-manager:latest 17 | docker exec mm python cli/mysql-cli.py init -f /etc/mm/cluster-spec.yaml 18 | sleep 30 19 | 20 | echo -e "\n\nCreating db through proxysql..." 21 | docker compose exec mysql-s1 mysql -uhamadmin -ppassword -h proxysql -e "use hamdb; CREATE TABLE t1 (c1 INT PRIMARY KEY, c2 TEXT NOT NULL);INSERT INTO t1 VALUES (1, 'Luis');" 22 | sleep 5 23 | 24 | echo -e "\n\nWrite data through proxysql..." 25 | docker compose exec mysql-s1 bash -c "for i in {1..1000}; do mysql -uhamadmin -ppassword -h proxysql hamdb -e 'insert into t1 values(floor(rand()*100000000), curdate())' 2>/dev/null; done" 26 | 27 | echo -e "\n\nChecking through proxysql..." 28 | docker compose exec mysql-s1 mysql -uhamadmin -ppassword -h proxysql -e "select count(*) from hamdb.t1;" 29 | 30 | echo -e "\n\nChecking master..." 31 | docker compose exec mysql-s1 mysql -uroot -proot -e "select count(*) from hamdb.t1;" 32 | 33 | echo -e "\n\nPurge master binary logs..." 34 | docker compose exec mysql-s1 mysql -uroot -proot -e "purge binary logs before now()" 35 | 36 | echo -e "\n\nAdding replica to master..." 37 | # docker rm -f mm 38 | # docker run -d \ 39 | # -v ./config/mm-config-mysql-2.yaml:/etc/mm/cluster-spec.yaml \ 40 | # -e ETCD_HOST=etcd -e ETCD_USERNAME=mm -e ETCD_PASSWORD=password -e ETCD_PREFIX=mm/cluster1/ \ 41 | # --network mysql-manager_default --name mm mysql-manager:latest 42 | docker exec mm python cli/mysql-cli.py add -h mysql-s2 -u root -p root -n s2 43 | sleep 30 44 | 45 | echo -e "\n\nChecking new replica..." 46 | sleep 10 47 | docker compose exec mysql-s2 mysql -uroot -proot -e "select count(*) from hamdb.t1;" 48 | docker compose exec mysql-s2 mysql -uroot -proot -e "show replica status\G" 49 | 50 | echo -e "\n\nChecking proxysql config and stats..." 51 | sleep 10 52 | docker compose exec mysql-s1 mysql -uradmin -ppwd -h proxysql -P6032 -e "select * from runtime_mysql_servers;" 53 | docker compose exec mysql-s1 mysql -uradmin -ppwd -h proxysql -P6032 -e "SELECT * FROM monitor.mysql_server_connect_log ORDER BY time_start_us DESC LIMIT 6" 54 | docker compose exec mysql-s1 mysql -uradmin -ppwd -h proxysql -P6032 -e "select Queries, srv_host from stats_mysql_connection_pool\G" 55 | docker compose exec mysql-s1 mysql -uradmin -ppwd -h proxysql -P6032 -e "select * from stats_mysql_query_rules" 56 | 57 | echo -e "\n\nTest persisted variables..." 58 | docker compose exec mysql-s2 mysql -uroot -proot -e "select @@global.super_read_only" 59 | docker compose exec mysql-s2 mysql -uroot -proot -e "select @@global.read_only" 60 | 61 | echo -e "\n\nTesting cluster status..." 62 | docker exec mm python /app/cli/mysql-cli.py mysql get-cluster-status 63 | 64 | echo -e "\n\nDestroying servers..." 65 | sleep 5 66 | docker compose down 67 | docker rm -f mm 68 | -------------------------------------------------------------------------------- /tests/cli-mysql-2-proxysql-failover.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source ./setup-etcd.sh 4 | 5 | echo "Creating servers..." 6 | docker compose down 7 | docker rm -f mm 8 | 9 | docker compose up -d 10 | setup_user 11 | docker build ./../ -t mysql-manager:latest 12 | docker run -d \ 13 | -v ./config/mm-config-mysql-2.yaml:/etc/mm/cluster-spec.yaml \ 14 | --network mysql-manager_default --name mm \ 15 | -e ETCD_HOST=etcd -e ETCD_USERNAME=mm -e ETCD_PASSWORD=password -e ETCD_PREFIX=mm/cluster1/ \ 16 | -p 8000:8000 mysql-manager:latest 17 | docker exec mm python cli/mysql-cli.py init -f /etc/mm/cluster-spec.yaml 18 | sleep 30 19 | 20 | echo -e "\n\nCreating db through proxysql..." 21 | docker compose exec mysql-s1 mysql -uhamadmin -ppassword -h proxysql -e "use hamdb; CREATE TABLE t1 (c1 INT PRIMARY KEY, c2 TEXT NOT NULL);INSERT INTO t1 VALUES (1, 'Luis');" 22 | sleep 5 23 | 24 | echo -e "\n\nTesting failover..." 25 | docker compose stop mysql-s1 26 | sleep 20 27 | docker compose exec mysql-s2 mysql -uroot -proot -e "show replica status\G" 28 | sleep 5 29 | docker compose exec mysql-s2 mysql -uroot -proot -e "show master status" 30 | sleep 5 31 | 32 | docker compose exec mysql-s2 mysql -uhamadmin -ppassword -h proxysql -e "select * from hamdb.t1;" 33 | 34 | echo -e "\n\nChecking events in master..." 35 | docker compose exec mysql-s2 mysql -uroot -proot -e "USE mysql; SHOW EVENTS;" 36 | sleep 5 37 | echo -e "\n\nChecking default user..." 38 | docker compose exec mysql-s2 mysql -uroot -proot -e "SELECT user FROM mysql.user" 39 | docker compose exec mysql-s2 mysql -uroot -proot -e "show grants for hamadmin" 40 | sleep 5 41 | 42 | echo -e "\n\nChecking default database..." 43 | docker compose exec mysql-s2 mysql -uroot -proot -e "show databases" 44 | sleep 5 45 | 46 | echo -e "\n\nChecking proxysql config and stats..." 47 | docker compose exec mysql-s2 mysql -uradmin -ppwd -h proxysql -P6032 -e "select * from runtime_mysql_servers;" 48 | docker compose exec mysql-s2 mysql -uradmin -ppwd -h proxysql -P6032 -e "SELECT * FROM monitor.mysql_server_connect_log ORDER BY time_start_us DESC LIMIT 6" 49 | docker compose exec mysql-s2 mysql -uradmin -ppwd -h proxysql -P6032 -e "select Queries, srv_host from stats_mysql_connection_pool\G" 50 | docker compose exec mysql-s2 mysql -uradmin -ppwd -h proxysql -P6032 -e "select * from stats_mysql_query_rules" 51 | sleep 5 52 | 53 | echo -e "\n\nChecking metrics from exporter..." 54 | curl localhost:9104/metrics | grep mysql_up 55 | sleep 5 56 | 57 | echo -e "\n\nTesting cluster status..." 58 | echo -e "\n[Case 1]: up, down" 59 | docker exec mm python /app/cli/mysql-cli.py mysql get-cluster-status 60 | sleep 5 61 | 62 | 63 | 64 | echo -e "\n\nStarting old master..." 65 | docker compose up -d mysql-s1 66 | sleep 20 67 | docker logs mm --tail 20 68 | sleep 5 69 | docker compose exec mysql-s2 mysql -uhamadmin -ppassword -h proxysql -e "use hamdb; INSERT INTO t1 VALUES (2, 'Jackie');" 70 | sleep 5 71 | docker compose exec mysql-s2 mysql -uroot -proot -e "show replica status\G" 72 | sleep 5 73 | docker compose exec mysql-s2 mysql -uroot -proot -e "show master status" 74 | sleep 5 75 | docker compose exec mysql-s1 mysql -uroot -proot -e "show replica status\G" 76 | sleep 5 77 | docker compose exec mysql-s1 mysql -uroot -proot -e "show master status" 78 | sleep 5 79 | docker compose exec mysql-s2 mysql -uradmin -ppwd -h proxysql -P6032 -e "select * from runtime_mysql_servers;" 80 | sleep 5 81 | curl localhost:9104/metrics | grep mysql_up 82 | curl localhost:9105/metrics | grep mysql_up 83 | sleep 5 84 | 85 | echo -e "\n\nTesting mysql manager restart..." 86 | docker rm -f mm 87 | docker run -d \ 88 | -v ./config/mm-config-mysql-2.yaml:/etc/mm/cluster-spec.yaml \ 89 | --network mysql-manager_default --name mm \ 90 | -e ETCD_HOST=etcd -e ETCD_USERNAME=mm -e ETCD_PASSWORD=password -e ETCD_PREFIX=mm/cluster1/ \ 91 | -p 8000:8000 mysql-manager:latest 92 | sleep 20 93 | docker compose exec mysql-s2 mysql -uroot -proot -e "show replica status\G" 94 | sleep 5 95 | docker compose exec mysql-s2 mysql -uroot -proot -e "show master status" 96 | sleep 5 97 | docker compose exec mysql-s1 mysql -uroot -proot -e "show replica status\G" 98 | sleep 5 99 | docker compose exec mysql-s1 mysql -uroot -proot -e "show master status" 100 | sleep 5 101 | docker compose exec mysql-s2 mysql -uradmin -ppwd -h proxysql -P6032 -e "select * from runtime_mysql_servers;" 102 | sleep 5 103 | 104 | echo -e "\n\nTesting cluster status..." 105 | echo -e "\n[Case 1]: up, up" 106 | docker exec mm python /app/cli/mysql-cli.py mysql get-cluster-status 107 | sleep 5 108 | 109 | echo -e "\n\nDestroying servers..." 110 | docker compose down 111 | docker rm -f mm 112 | -------------------------------------------------------------------------------- /tests/cli-mysql-2-proxysql.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo -e "\U1F6A7 Creating servers..." 4 | source ./setup-etcd.sh 5 | 6 | echo "Creating servers..." 7 | docker compose down 8 | docker rm -f mm 9 | 10 | docker compose up -d 11 | setup_user 12 | docker build ./../ -t mysql-manager:latest 13 | docker run -d \ 14 | -v ./config/mm-config-mysql-2.yaml:/etc/mm/cluster-spec.yaml \ 15 | --network mysql-manager_default --name mm \ 16 | -e ETCD_HOST=etcd -e ETCD_USERNAME=mm -e ETCD_PASSWORD=password -e ETCD_PREFIX=mm/cluster1/ \ 17 | -p 8000:8000 mysql-manager:latest 18 | docker exec mm python cli/mysql-cli.py init -f /etc/mm/cluster-spec.yaml 19 | sleep 30 20 | 21 | 22 | echo -e "\n\n\U1F4BB Creating db in master..." 23 | docker compose exec mysql-s1 mysql -uhamadmin -ppassword -h proxysql -e "use hamdb; CREATE TABLE t1 (c1 INT PRIMARY KEY, c2 TEXT NOT NULL);INSERT INTO t1 VALUES (1, 'Luis');" 24 | sleep 5 25 | 26 | echo -e "\n\n\U1F4BB Checking through proxysql..." 27 | docker compose exec mysql-s1 mysql -uhamadmin -ppassword -h proxysql -e "select * from hamdb.t1;" 28 | sleep 5 29 | 30 | echo -e "\n\n\U270c Checking master..." 31 | docker compose exec mysql-s1 mysql -uroot -proot -e "select * from hamdb.t1;" 32 | sleep 5 33 | 34 | echo -e "\n\n\U270c Checking replica..." 35 | docker compose exec mysql-s2 mysql -uroot -proot -e "select * from hamdb.t1;" 36 | sleep 5 37 | 38 | echo -e "\n\n\U270c Checking events in master..." 39 | docker compose exec mysql-s1 mysql -uroot -proot -e "USE mysql; SHOW EVENTS;" 40 | sleep 5 41 | 42 | echo -e "\n\n\U270c Checking default user..." 43 | docker compose exec mysql-s1 mysql -uroot -proot -e "SELECT user FROM mysql.user" 44 | docker compose exec mysql-s1 mysql -uroot -proot -e "show grants for hamadmin" 45 | sleep 5 46 | 47 | echo -e "\n\n\U270c Checking default database..." 48 | docker compose exec mysql-s1 mysql -uroot -proot -e "show databases" 49 | sleep 5 50 | 51 | echo -e "\n\n\U1F6B6 Checking proxysql config and stats..." 52 | sleep 10 53 | docker compose exec mysql-s1 mysql -uradmin -ppwd -h proxysql -P6032 -e "select * from runtime_mysql_servers;" 54 | docker compose exec mysql-s1 mysql -uradmin -ppwd -h proxysql -P6032 -e "SELECT * FROM monitor.mysql_server_connect_log ORDER BY time_start_us DESC LIMIT 6" 55 | docker compose exec mysql-s1 mysql -uradmin -ppwd -h proxysql -P6032 -e "select Queries, srv_host from stats_mysql_connection_pool\G" 56 | docker compose exec mysql-s1 mysql -uradmin -ppwd -h proxysql -P6032 -e "select * from stats_mysql_query_rules" 57 | sleep 5 58 | 59 | echo -e "\n\n\U1F6B6 Checking metrics from exporter..." 60 | curl localhost:9104/metrics | grep mysql_up 61 | 62 | 63 | echo -e "\n\n\U1F6B6 Test persisted variables..." 64 | docker compose restart mysql-s2 65 | sleep 20 66 | docker compose exec mysql-s2 mysql -uroot -proot -e "select @@global.super_read_only" 67 | docker compose exec mysql-s2 mysql -uroot -proot -e "select @@global.read_only" 68 | 69 | echo -e "\n\n\U1F6B6 Testing add replica..." 70 | docker compose exec mm python /app/cli/mysql-cli.py mysql add-replica 71 | 72 | echo -e "\n\n\U1F6B6 Testing cluster status..." 73 | echo -e "\n[Case 1]: up, up" 74 | docker exec mm python /app/cli/mysql-cli.py mysql get-cluster-status 75 | 76 | echo -e "\n[Case 2]: up, replication_threads_stopped" 77 | docker compose exec mysql-s2 mysql -uroot -proot -e "stop replica io_thread" 78 | sleep 5 79 | docker exec mm python /app/cli/mysql-cli.py mysql get-cluster-status 80 | 81 | echo -e "\n[Case 3]: up, replication_threads_stopped" 82 | docker compose exec mysql-s2 mysql -uroot -proot -e "start replica io_thread" 83 | docker compose exec mysql-s2 mysql -uroot -proot -e "stop replica sql_thread" 84 | sleep 5 85 | docker exec mm python /app/cli/mysql-cli.py mysql get-cluster-status 86 | 87 | echo -e "\n[Case 4]: up, down" 88 | docker compose stop mysql-s2 89 | sleep 5 90 | docker exec mm python /app/cli/mysql-cli.py mysql get-cluster-status 91 | sleep 5 92 | docker compose start mysql-s2 93 | sleep 30 94 | 95 | echo -e "\n\nTesting mysql manager restart..." 96 | docker rm -f mm 97 | docker run -d \ 98 | -v ./config/mm-config-mysql-2.yaml:/etc/mm/cluster-spec.yaml \ 99 | --network mysql-manager_default --name mm \ 100 | -e ETCD_HOST=etcd -e ETCD_USERNAME=mm -e ETCD_PASSWORD=password -e ETCD_PREFIX=mm/cluster1/ \ 101 | -p 8000:8000 mysql-manager:latest 102 | sleep 30 103 | docker logs mm --tail 40 104 | sleep 5 105 | docker compose exec mysql-s2 mysql -uroot -proot -e "show replica status\G" 106 | sleep 5 107 | docker compose exec mysql-s2 mysql -uroot -proot -e "show master status" 108 | sleep 5 109 | docker compose exec mysql-s1 mysql -uroot -proot -e "show replica status\G" 110 | sleep 5 111 | docker compose exec mysql-s1 mysql -uroot -proot -e "show master status" 112 | sleep 5 113 | docker compose exec mysql-s2 mysql -uradmin -ppwd -h proxysql -P6032 -e "select * from runtime_mysql_servers;" 114 | sleep 5 115 | 116 | echo -e "\n\nTesting cluster status..." 117 | echo -e "\n[Case 1]: up, up" 118 | docker exec mm python /app/cli/mysql-cli.py mysql get-cluster-status 119 | sleep 5 120 | 121 | echo -e "\n\n\U1F6A7 Destroying servers..." 122 | sleep 5 123 | docker compose down 124 | docker rm -f mm 125 | -------------------------------------------------------------------------------- /tests/config/mm-config-mysql-1.yaml: -------------------------------------------------------------------------------- 1 | mysqls: 2 | s1: 3 | host: mysql-s1 4 | user: root 5 | password: root 6 | users: 7 | replPassword: password 8 | exporterPassword: exporter 9 | nonprivPassword: password 10 | nonprivUser: hamadmin 11 | proxysqlMonPassword: password 12 | 13 | -------------------------------------------------------------------------------- /tests/config/mm-config-mysql-2-migrate.yaml: -------------------------------------------------------------------------------- 1 | mysqls: 2 | s1: 3 | host: mysql-s1 4 | user: root 5 | password: root 6 | s2: 7 | host: mysql-s2 8 | user: root 9 | password: root 10 | remote: 11 | host: 12 | user: 13 | password: 14 | port: 15 | users: 16 | replPassword: password 17 | exporterPassword: exporter 18 | nonprivPassword: password 19 | nonprivUser: hamadmin 20 | proxysqlMonPassword: password 21 | 22 | -------------------------------------------------------------------------------- /tests/config/mm-config-mysql-2.yaml: -------------------------------------------------------------------------------- 1 | mysqls: 2 | s1: 3 | host: mysql-s1 4 | user: root 5 | password: root 6 | s2: 7 | host: mysql-s2 8 | user: root 9 | password: root 10 | users: 11 | replPassword: password 12 | exporterPassword: exporter 13 | nonprivPassword: password 14 | nonprivUser: hamadmin 15 | proxysqlMonPassword: password 16 | 17 | -------------------------------------------------------------------------------- /tests/config/mysql-exporter-s1.cnf: -------------------------------------------------------------------------------- 1 | [client] 2 | host = mysql-s1 3 | user = exporter 4 | password = exporter 5 | -------------------------------------------------------------------------------- /tests/config/mysql-exporter-s2.cnf: -------------------------------------------------------------------------------- 1 | [client] 2 | host = mysql-s2 3 | user = exporter 4 | password = exporter 5 | -------------------------------------------------------------------------------- /tests/config/mysql-s1.cnf: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | server-id = 1 3 | gtid-mode = ON 4 | enforce-gtid-consistency = ON 5 | log-bin = binlog 6 | relay-log = relaylog 7 | datadir = /var/lib/mysql 8 | binlog_expire_logs_seconds = 259200 9 | binlog_expire_logs_auto_purge = ON 10 | max_binlog_size = 104857600 11 | slow_query_log = 1 12 | long_query_time = 1 13 | slow_query_log_file = /var/lib/mysql/slow.log 14 | max_connections = 1000 -------------------------------------------------------------------------------- /tests/config/mysql-s2.cnf: -------------------------------------------------------------------------------- 1 | [mysqld] 2 | server-id = 2 3 | gtid-mode = ON 4 | enforce-gtid-consistency = ON 5 | log-bin = binlog 6 | relay-log = relaylog 7 | datadir = /var/lib/mysql 8 | binlog_expire_logs_seconds = 259200 9 | binlog_expire_logs_auto_purge = ON 10 | max_binlog_size = 104857600 11 | slow_query_log = 1 12 | long_query_time = 1 13 | slow_query_log_file = /var/lib/mysql/slow.log 14 | max_connections = 1000 -------------------------------------------------------------------------------- /tests/config/prometheus.yaml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 10s 3 | evaluation_interval: 10s 4 | rule_files: 5 | - /etc/prometheus/rules.yaml 6 | scrape_configs: 7 | - job_name: "mysql-s2" 8 | static_configs: 9 | - targets: ["mysql-exporter-s2:9104"] 10 | -------------------------------------------------------------------------------- /tests/config/proxysql.cnf: -------------------------------------------------------------------------------- 1 | datadir="/var/lib/proxysql" 2 | admin_variables= 3 | { 4 | admin_credentials="admin:pwd;radmin:pwd" 5 | mysql_ifaces="0.0.0.0:6032" 6 | restapi_enabled=true 7 | restapi_port=6070 8 | } 9 | mysql_variables= 10 | { 11 | threads=4 12 | max_connections=2048 13 | default_query_delay=0 14 | default_query_timeout=36000000 15 | have_compress=true 16 | poll_timeout=2000 17 | interfaces="0.0.0.0:3306" 18 | default_schema="information_schema" 19 | stacksize=1048576 20 | server_version="5.5.30" 21 | connect_timeout_server=3000 22 | monitor_history=600000 23 | monitor_connect_interval=60000 24 | monitor_ping_interval=10000 25 | monitor_read_only_interval=1500 26 | monitor_read_only_timeout=500 27 | ping_interval_server_msec=120000 28 | ping_timeout_server=500 29 | commands_stats=true 30 | sessions_sort=true 31 | connect_retries_on_failure=10 32 | } -------------------------------------------------------------------------------- /tests/config/rules.yaml: -------------------------------------------------------------------------------- 1 | groups: 2 | - name: mysql 3 | rules: 4 | - alert: mysql:replication_sql:problem 5 | expr: mysql_slave_status_slave_sql_running == 0 6 | for: 1m 7 | labels: 8 | severity: critical 9 | db_type: mysql 10 | annotations: 11 | summary: replication is down 12 | description: replication is down 13 | -------------------------------------------------------------------------------- /tests/features/environment.py: -------------------------------------------------------------------------------- 1 | import os 2 | from testcontainers.core.image import DockerImage 3 | from tests.integration_test.environment.test_environment_factory import TestEnvironmentFactory 4 | 5 | def before_all(context): 6 | context.mysql_manager_image = os.getenv("MYSQL_MANAGER_IMAGE", "mysql-manager:latest") 7 | context.haproxy_image = os.getenv("HAPROXY_IMAGE", "mm-haproxy:latest") 8 | context.mysql_image = os.getenv("MYSQL_IMAGE", "hub.hamdocker.ir/library/mysql:8.0.35-bullseye") 9 | if os.getenv("BUILD_IMAGE", "false") == "true": 10 | DockerImage(path=".", tag=context.mysql_manager_image).build() 11 | 12 | def before_scenario(context, scenario): 13 | context.test_env = TestEnvironmentFactory() 14 | 15 | def after_scenario(context, scenario): 16 | context.test_env.stop() 17 | -------------------------------------------------------------------------------- /tests/features/failover.feature: -------------------------------------------------------------------------------- 1 | Feature: test failover 2 | setup 2 nodes, kill master and check failover, after that restart previous master and it should join the cluster as a replica node 3 | 4 | ## TODO: check read only values on servers 5 | Scenario: start first mysql and add second replica 6 | Given setup etcd with name etcd and image: quay.hamdocker.ir/coreos/etcd:v3.5.9-amd64 7 | And setup user root with password: password for etcd 8 | And setup user mm for etcd with password: password access to path mm/cluster1/ 9 | And setup default mysql with server_id 1 10 | And setup default mysql with server_id 2 11 | And setup mysql_manager with name mm with env ETCD_HOST=etcd ETCD_USERNAME=mm ETCD_PASSWORD=password ETCD_PREFIX=mm/cluster1/ 12 | And setup haproxy with name hap1 with env ETCD_HOST=http://etcd:2379 ETCD_USERNAME=mm ETCD_PASSWORD=password ETCD_PREFIX=mm/cluster1/ 13 | And setup haproxy with name hap2 with env ETCD_HOST=http://etcd:2379 ETCD_USERNAME=mm ETCD_PASSWORD=password ETCD_PREFIX=mm/cluster1/ 14 | And init mysql cluster spec 15 | And sleep 30 seconds 16 | When execute mysql query with user: hamadmin, password: password, host: hap1 and port: 3306 query: use hamdb; CREATE TABLE t1 (c1 INT PRIMARY KEY, c2 TEXT NOT NULL);INSERT INTO t1 VALUES (1, 'Luis'); 17 | Given stop mysql with server_id 1 18 | And sleep 30 seconds 19 | Then cluster status must be 20 | """ 21 | source=up 22 | replica=down 23 | 24 | """ 25 | Then result of query: "select @@global.server_id;" with user: hamadmin and password: password on host: hap1 and port: 3306 should be 26 | """ 27 | 28 | 29 | 30 | 31 | 2 32 | 33 | 34 | """ 35 | Then result of query: "select @@global.server_id;" with user: hamadmin and password: password on host: hap2 and port: 3306 should be 36 | """ 37 | 38 | 39 | 40 | 41 | 2 42 | 43 | 44 | """ 45 | Then result of query: "show replica status;" with user: root and password: root on host: mysql-s2 and port: 3306 should be 46 | """ 47 | 48 | 49 | 50 | """ 51 | 52 | # Then result of query: "show master status;" with user: root and password: root on host: mysql-s2 and port: 3306 should be 53 | # """ 54 | # """ 55 | 56 | Then result of query: "select * from hamdb.t1;" with user: hamadmin and password: password on host: hap1 and port: 3306 should be 57 | """ 58 | 59 | 60 | 61 | 62 | 1 63 | Luis 64 | 65 | 66 | """ 67 | 68 | Then result of query: "select * from hamdb.t1;" with user: hamadmin and password: password on host: hap2 and port: 3306 should be 69 | """ 70 | 71 | 72 | 73 | 74 | 1 75 | Luis 76 | 77 | 78 | """ 79 | 80 | Then result of query: "SELECT user FROM mysql.user;" with user: root and password: root on host: mysql-s2 and port: 3306 should be 81 | """ 82 | 83 | 84 | 85 | 86 | exporter 87 | 88 | 89 | 90 | hamadmin 91 | 92 | 93 | 94 | replica 95 | 96 | 97 | 98 | root 99 | 100 | 101 | 102 | mysql.infoschema 103 | 104 | 105 | 106 | mysql.session 107 | 108 | 109 | 110 | mysql.sys 111 | 112 | 113 | 114 | root 115 | 116 | 117 | """ 118 | 119 | Then result of query: "show grants for hamadmin;" with user: root and password: root on host: mysql-s2 and port: 3306 should be 120 | """ 121 | 122 | 123 | 124 | 125 | GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, SHUTDOWN, PROCESS, FILE, REFERENCES, INDEX, ALTER, SHOW DATABASES, CREATE TEMPORARY TABLES, LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER, CREATE TABLESPACE, CREATE ROLE, DROP ROLE ON *.* TO `hamadmin`@`%` WITH GRANT OPTION 126 | 127 | 128 | 129 | GRANT APPLICATION_PASSWORD_ADMIN,AUDIT_ABORT_EXEMPT,AUDIT_ADMIN,AUTHENTICATION_POLICY_ADMIN,BACKUP_ADMIN,BINLOG_ADMIN,BINLOG_ENCRYPTION_ADMIN,CLONE_ADMIN,ENCRYPTION_KEY_ADMIN,FIREWALL_EXEMPT,FLUSH_OPTIMIZER_COSTS,FLUSH_STATUS,FLUSH_TABLES,FLUSH_USER_RESOURCES,GROUP_REPLICATION_ADMIN,GROUP_REPLICATION_STREAM,INNODB_REDO_LOG_ARCHIVE,INNODB_REDO_LOG_ENABLE,PASSWORDLESS_USER_ADMIN,PERSIST_RO_VARIABLES_ADMIN,REPLICATION_APPLIER,RESOURCE_GROUP_ADMIN,RESOURCE_GROUP_USER,ROLE_ADMIN,SENSITIVE_VARIABLES_OBSERVER,SERVICE_CONNECTION_ADMIN,SESSION_VARIABLES_ADMIN,SET_USER_ID,SHOW_ROUTINE,SYSTEM_USER,SYSTEM_VARIABLES_ADMIN,TABLE_ENCRYPTION_ADMIN,TELEMETRY_LOG_ADMIN,XA_RECOVER_ADMIN ON *.* TO `hamadmin`@`%` WITH GRANT OPTION 130 | 131 | 132 | """ 133 | 134 | Then result of query: "show databases;" with user: root and password: root on host: mysql-s2 and port: 3306 should be 135 | """ 136 | 137 | 138 | 139 | 140 | hamdb 141 | 142 | 143 | 144 | information_schema 145 | 146 | 147 | 148 | mysql 149 | 150 | 151 | 152 | performance_schema 153 | 154 | 155 | 156 | sys 157 | 158 | 159 | 160 | test 161 | 162 | 163 | """ 164 | 165 | Given start mysql with server_id 1 166 | Given sleep 30 seconds 167 | Then cluster status must be 168 | """ 169 | source=up 170 | replica=up 171 | 172 | """ 173 | Then result of query: "select @@global.server_id;" with user: hamadmin and password: password on host: hap1 and port: 3306 should be 174 | """ 175 | 176 | 177 | 178 | 179 | 2 180 | 181 | 182 | """ 183 | Then result of query: "select @@global.server_id;" with user: hamadmin and password: password on host: hap2 and port: 3306 should be 184 | """ 185 | 186 | 187 | 188 | 189 | 2 190 | 191 | 192 | """ 193 | Then result of query: "select @@global.server_id;" with user: hamadmin and password: password on host: hap1 and port: 3307 should be 194 | """ 195 | 196 | 197 | 198 | 199 | 1 200 | 201 | 202 | """ 203 | Then result of query: "select @@global.server_id;" with user: hamadmin and password: password on host: hap2 and port: 3307 should be 204 | """ 205 | 206 | 207 | 208 | 209 | 1 210 | 211 | 212 | """ 213 | When execute mysql query with user: hamadmin, password: password, host: hap2 and port: 3306 query: INSERT INTO hamdb.t1 VALUES (2, 'Hassan'); 214 | Then result of query: "show replica status;" with user: root and password: root on host: mysql-s2 and port: 3306 should be 215 | """ 216 | 217 | 218 | 219 | """ 220 | # Then result of query: "show master status;" with user: root and password: root on host: mysql-s2 and port: 3306 should be 221 | # """ 222 | # """ 223 | 224 | # Then result of query: "show replica status;" with user: root and password: root on host: mysql-s1 and port: 3306 should be 225 | # """ 226 | # """ 227 | 228 | # Then result of query: "show master status;" with user: root and password: root on host: mysql-s1 and port: 3306 should be 229 | # """ 230 | # """ 231 | 232 | Then result of query: "select * from hamdb.t1;" with user: hamadmin and password: password on host: hap1 and port: 3306 should be 233 | """ 234 | 235 | 236 | 237 | 238 | 1 239 | Luis 240 | 241 | 242 | 2 243 | Hassan 244 | 245 | 246 | """ 247 | 248 | Then result of query: "select * from hamdb.t1;" with user: hamadmin and password: password on host: hap2 and port: 3306 should be 249 | """ 250 | 251 | 252 | 253 | 254 | 1 255 | Luis 256 | 257 | 258 | 2 259 | Hassan 260 | 261 | 262 | """ 263 | 264 | Then result of query: "select * from hamdb.t1;" with user: root and password: root on host: mysql-s1 and port: 3306 should be 265 | """ 266 | 267 | 268 | 269 | 270 | 1 271 | Luis 272 | 273 | 274 | 2 275 | Hassan 276 | 277 | 278 | """ 279 | 280 | Then result of query: "select * from hamdb.t1;" with user: root and password: root on host: mysql-s2 and port: 3306 should be 281 | """ 282 | 283 | 284 | 285 | 286 | 1 287 | Luis 288 | 289 | 290 | 2 291 | Hassan 292 | 293 | 294 | """ 295 | 296 | Given restart mysql manager with env ETCD_HOST=etcd ETCD_USERNAME=mm ETCD_PASSWORD=password ETCD_PREFIX=mm/cluster1/ 297 | And sleep 20 seconds 298 | Then cluster status must be 299 | """ 300 | source=up 301 | replica=up 302 | 303 | """ 304 | Then result of query: "show replica status;" with user: root and password: root on host: mysql-s2 and port: 3306 should be 305 | """ 306 | 307 | 308 | 309 | """ 310 | # Then result of query: "show master status;" with user: root and password: root on host: mysql-s2 and port: 3306 should be 311 | # """ 312 | # """ 313 | 314 | # Then result of query: "show replica status;" with user: root and password: root on host: mysql-s1 and port: 3306 should be 315 | # """ 316 | # """ 317 | 318 | # Then result of query: "show master status;" with user: root and password: root on host: mysql-s1 and port: 3306 should be 319 | # """ 320 | # """ 321 | 322 | 323 | Scenario: increase the failـinterval time and then do a failover 324 | Given setup etcd with name etcd and image: quay.hamdocker.ir/coreos/etcd:v3.5.9-amd64 325 | And setup user root with password: password for etcd 326 | And setup user mm for etcd with password: password access to path mm/cluster1/ 327 | And setup default mysql with server_id 1 328 | And setup default mysql with server_id 2 329 | And setup mysql_manager with name mm with env ETCD_HOST=etcd ETCD_USERNAME=mm ETCD_PASSWORD=password ETCD_PREFIX=mm/cluster1/ 330 | And init mysql cluster spec 331 | And change fail interval to 60 seconds 332 | And sleep 30 seconds 333 | """ 334 | source=up 335 | replica=up 336 | 337 | """ 338 | And stop mysql with server_id 1 339 | And sleep 45 seconds 340 | Then cluster status must be 341 | """ 342 | source=down 343 | replica=replication_threads_stopped 344 | 345 | """ 346 | Given sleep 35 seconds 347 | Then cluster status must be 348 | """ 349 | source=up 350 | replica=down 351 | 352 | """ 353 | Given start mysql with server_id 1 354 | And restart mysql manager with env ETCD_HOST=etcd ETCD_USERNAME=mm ETCD_PASSWORD=password ETCD_PREFIX=mm/cluster1/ 355 | And sleep 40 seconds 356 | Then cluster status must be 357 | """ 358 | source=up 359 | replica=up 360 | 361 | """ 362 | -------------------------------------------------------------------------------- /tests/features/idempotency.feature: -------------------------------------------------------------------------------- 1 | ## proxysql rules 2 | ## mysql event 3 | -------------------------------------------------------------------------------- /tests/features/mysql-add-replica.feature: -------------------------------------------------------------------------------- 1 | Feature: add replica to cluster 2 | add one replica to current cluster and check its state 3 | 4 | Scenario: start first mysql and add second replica 5 | Given setup etcd with name etcd and image: quay.hamdocker.ir/coreos/etcd:v3.5.9-amd64 6 | And setup user root with password: password for etcd 7 | And setup user mm for etcd with password: password access to path mm/cluster1/ 8 | And setup default mysql with server_id 1 9 | And setup mysql_manager with name mm with env ETCD_HOST=etcd ETCD_USERNAME=mm ETCD_PASSWORD=password ETCD_PREFIX=mm/cluster1/ 10 | And init mysql cluster spec 11 | And setup haproxy with name hap1 with env ETCD_HOST=http://etcd:2379 ETCD_USERNAME=mm ETCD_PASSWORD=password ETCD_PREFIX=mm/cluster1/ 12 | And setup haproxy with name hap2 with env ETCD_HOST=http://etcd:2379 ETCD_USERNAME=mm ETCD_PASSWORD=password ETCD_PREFIX=mm/cluster1/ 13 | And sleep 30 seconds 14 | Then cluster status must be 15 | """ 16 | source=up 17 | replica=down 18 | 19 | """ 20 | When execute mysql query with user: hamadmin, password: password, host: hap1 and port: 3306 query: use hamdb; CREATE TABLE t1 (c1 INT PRIMARY KEY, c2 TEXT NOT NULL);INSERT INTO t1 VALUES (1, 'Luis'); 21 | And execute mysql query with user: root, password: root, host: mysql-s1 and port: 3306 query: flush binary logs; 22 | And execute mysql query with user: root, password: root, host: mysql-s1 and port: 3306 query: purge binary logs before now(); 23 | And execute mysql query with user: hamadmin, password: password, host: hap2 and port: 3306 query: use hamdb; INSERT INTO hamdb.t1 VALUES (2, 'Hassan'); 24 | And execute mysql query with user: root, password: root, host: mysql-s1 and port: 3306 query: flush binary logs; 25 | Given sleep 30 seconds 26 | Then result of query: "select * from hamdb.t1;" with user: hamadmin and password: password on host: hap1 and port: 3306 should be 27 | """ 28 | 29 | 30 | 31 | 32 | 1 33 | Luis 34 | 35 | 36 | 2 37 | Hassan 38 | 39 | 40 | """ 41 | 42 | Then result of query: "select * from hamdb.t1;" with user: root and password: root on host: mysql-s1 and port: 3306 should be 43 | """ 44 | 45 | 46 | 47 | 48 | 1 49 | Luis 50 | 51 | 52 | 2 53 | Hassan 54 | 55 | 56 | """ 57 | Given setup default mysql with server_id 2 58 | And add mysql to cluster with host: mysql-s2 and name: s2 and user: root and password: root 59 | And sleep 50 seconds 60 | Then cluster status must be 61 | """ 62 | source=up 63 | replica=up 64 | 65 | """ 66 | Then result of query: "select @@global.server_id;" with user: hamadmin and password: password on host: hap1 and port: 3306 should be 67 | """ 68 | 69 | 70 | 71 | 72 | 1 73 | 74 | 75 | """ 76 | Then result of query: "select @@global.server_id;" with user: hamadmin and password: password on host: hap2 and port: 3306 should be 77 | """ 78 | 79 | 80 | 81 | 82 | 1 83 | 84 | 85 | """ 86 | Then result of query: "select @@global.server_id;" with user: hamadmin and password: password on host: hap1 and port: 3307 should be 87 | """ 88 | 89 | 90 | 91 | 92 | 2 93 | 94 | 95 | """ 96 | Then result of query: "select @@global.server_id;" with user: hamadmin and password: password on host: hap2 and port: 3307 should be 97 | """ 98 | 99 | 100 | 101 | 102 | 2 103 | 104 | 105 | """ 106 | Then result of query: "select * from hamdb.t1;" with user: root and password: root on host: mysql-s2 and port: 3306 should be 107 | """ 108 | 109 | 110 | 111 | 112 | 1 113 | Luis 114 | 115 | 116 | 2 117 | Hassan 118 | 119 | 120 | """ 121 | 122 | Scenario: start with two mysqls and add source again 123 | Given setup etcd with name etcd and image: quay.hamdocker.ir/coreos/etcd:v3.5.9-amd64 124 | And setup user root with password: password for etcd 125 | And setup user mm for etcd with password: password access to path mm/cluster1/ 126 | And setup default mysql with server_id 1 127 | And setup default mysql with server_id 2 128 | And setup mysql_manager with name mm with env ETCD_HOST=etcd ETCD_USERNAME=mm ETCD_PASSWORD=password ETCD_PREFIX=mm/cluster1/ 129 | And init mysql cluster spec 130 | And sleep 40 seconds 131 | Then cluster status must be 132 | """ 133 | source=up 134 | replica=up 135 | 136 | """ 137 | Given add mysql to cluster with host: mysql-s1 and name: s1 and user: root and password: root 138 | And restart mysql manager with env ETCD_HOST=etcd ETCD_USERNAME=mm ETCD_PASSWORD=password ETCD_PREFIX=mm/cluster1/ 139 | And sleep 10 seconds 140 | 141 | Then cluster status must be 142 | """ 143 | source=up 144 | replica=up 145 | 146 | """ 147 | 148 | -------------------------------------------------------------------------------- /tests/features/one-mysql-and-haproxy.feature: -------------------------------------------------------------------------------- 1 | Feature: one-mysql-and-two-haproxies 2 | Setup one mysql and two haproxies with mm 3 | 4 | Scenario: check start one mysql with two haproxies 5 | Given setup etcd with name etcd and image: quay.hamdocker.ir/coreos/etcd:v3.5.9-amd64 6 | And setup user root with password: password for etcd 7 | And setup user mm for etcd with password: password access to path mm/cluster1/ 8 | And setup default mysql with server_id 1 9 | And setup mysql_manager with name mm with env ETCD_HOST=etcd ETCD_USERNAME=mm ETCD_PASSWORD=password ETCD_PREFIX=mm/cluster1/ 10 | And setup haproxy with name hap1 with env ETCD_HOST=http://etcd:2379 ETCD_USERNAME=mm ETCD_PASSWORD=password ETCD_PREFIX=mm/cluster1/ 11 | And setup haproxy with name hap2 with env ETCD_HOST=http://etcd:2379 ETCD_USERNAME=mm ETCD_PASSWORD=password ETCD_PREFIX=mm/cluster1/ 12 | And init mysql cluster spec 13 | And sleep 15 seconds 14 | Then cluster status must be 15 | """ 16 | source=up 17 | replica=down 18 | 19 | """ 20 | When execute mysql query with user: hamadmin, password: password, host: hap1 and port: 3306 query: use hamdb; CREATE TABLE t1 (c1 INT PRIMARY KEY, c2 TEXT NOT NULL);INSERT INTO t1 VALUES (1, 'Luis'); 21 | Then result of query: "select * from hamdb.t1;" with user: hamadmin and password: password on host: hap1 and port: 3306 should be 22 | """ 23 | 24 | 25 | 26 | 27 | 1 28 | Luis 29 | 30 | 31 | """ 32 | Then result of query: "select @@global.server_id;" with user: hamadmin and password: password on host: hap1 and port: 3306 should be 33 | """ 34 | 35 | 36 | 37 | 38 | 1 39 | 40 | 41 | """ 42 | Then result of query: "select @@global.server_id;" with user: hamadmin and password: password on host: hap2 and port: 3306 should be 43 | """ 44 | 45 | 46 | 47 | 48 | 1 49 | 50 | 51 | """ 52 | Then result of query: "select * from hamdb.t1;" with user: hamadmin and password: password on host: hap2 and port: 3306 should be 53 | """ 54 | 55 | 56 | 57 | 58 | 1 59 | Luis 60 | 61 | 62 | """ 63 | 64 | Then result of query: "select * from hamdb.t1;" with user: root and password: root on host: mysql-s1 and port: 3306 should be 65 | """ 66 | 67 | 68 | 69 | 70 | 1 71 | Luis 72 | 73 | 74 | """ 75 | 76 | # Then result of query: "USE mysql; SHOW EVENTS;" with user: root and password: root on host: mysql-s1 and port: 3306 should be 77 | # """ 78 | # """ 79 | 80 | Then result of query: "SELECT user FROM mysql.user;" with user: root and password: root on host: mysql-s1 and port: 3306 should be 81 | """ 82 | 83 | 84 | 85 | 86 | exporter 87 | 88 | 89 | 90 | hamadmin 91 | 92 | 93 | 94 | replica 95 | 96 | 97 | 98 | root 99 | 100 | 101 | 102 | mysql.infoschema 103 | 104 | 105 | 106 | mysql.session 107 | 108 | 109 | 110 | mysql.sys 111 | 112 | 113 | 114 | root 115 | 116 | 117 | """ 118 | 119 | Then result of query: "show grants for hamadmin;" with user: root and password: root on host: mysql-s1 and port: 3306 should be 120 | """ 121 | 122 | 123 | 124 | 125 | GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, SHUTDOWN, PROCESS, FILE, REFERENCES, INDEX, ALTER, SHOW DATABASES, CREATE TEMPORARY TABLES, LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER, CREATE TABLESPACE, CREATE ROLE, DROP ROLE ON *.* TO `hamadmin`@`%` WITH GRANT OPTION 126 | 127 | 128 | 129 | GRANT APPLICATION_PASSWORD_ADMIN,AUDIT_ABORT_EXEMPT,AUDIT_ADMIN,AUTHENTICATION_POLICY_ADMIN,BACKUP_ADMIN,BINLOG_ADMIN,BINLOG_ENCRYPTION_ADMIN,CLONE_ADMIN,ENCRYPTION_KEY_ADMIN,FIREWALL_EXEMPT,FLUSH_OPTIMIZER_COSTS,FLUSH_STATUS,FLUSH_TABLES,FLUSH_USER_RESOURCES,GROUP_REPLICATION_ADMIN,GROUP_REPLICATION_STREAM,INNODB_REDO_LOG_ARCHIVE,INNODB_REDO_LOG_ENABLE,PASSWORDLESS_USER_ADMIN,PERSIST_RO_VARIABLES_ADMIN,REPLICATION_APPLIER,RESOURCE_GROUP_ADMIN,RESOURCE_GROUP_USER,ROLE_ADMIN,SENSITIVE_VARIABLES_OBSERVER,SERVICE_CONNECTION_ADMIN,SESSION_VARIABLES_ADMIN,SET_USER_ID,SHOW_ROUTINE,SYSTEM_USER,SYSTEM_VARIABLES_ADMIN,TABLE_ENCRYPTION_ADMIN,TELEMETRY_LOG_ADMIN,XA_RECOVER_ADMIN ON *.* TO `hamadmin`@`%` WITH GRANT OPTION 130 | 131 | 132 | """ 133 | 134 | Then result of query: "show databases;" with user: root and password: root on host: mysql-s1 and port: 3306 should be 135 | """ 136 | 137 | 138 | 139 | 140 | hamdb 141 | 142 | 143 | 144 | information_schema 145 | 146 | 147 | 148 | mysql 149 | 150 | 151 | 152 | performance_schema 153 | 154 | 155 | 156 | sys 157 | 158 | 159 | 160 | test 161 | 162 | 163 | """ 164 | -------------------------------------------------------------------------------- /tests/features/remove-replica.feature: -------------------------------------------------------------------------------- 1 | Feature: remove-replica 2 | Setup two mysqls and remove one of them 3 | 4 | Scenario: start 2 mysql and remove replica and source 5 | Given setup etcd with name etcd and image: quay.hamdocker.ir/coreos/etcd:v3.5.9-amd64 6 | And setup user root with password: password for etcd 7 | And setup user mm for etcd with password: password access to path mm/cluster1/ 8 | And setup default mysql with server_id 1 9 | And setup default mysql with server_id 2 10 | And setup mysql_manager with name mm with env ETCD_HOST=etcd ETCD_USERNAME=mm ETCD_PASSWORD=password ETCD_PREFIX=mm/cluster1/ 11 | And init mysql cluster spec 12 | And sleep 40 seconds 13 | Then cluster status must be 14 | """ 15 | source=up 16 | replica=up 17 | 18 | """ 19 | Given remove mysql with name: s1 20 | And sleep 10 seconds 21 | Then cluster status must be 22 | """ 23 | source=up 24 | replica=up 25 | 26 | """ 27 | 28 | Given remove mysql with name: s2 29 | And sleep 10 seconds 30 | Then cluster status must be 31 | """ 32 | source=up 33 | replica=down 34 | 35 | """ 36 | -------------------------------------------------------------------------------- /tests/features/steps/steps.py: -------------------------------------------------------------------------------- 1 | import time 2 | import xmltodict 3 | import logging 4 | from behave import * 5 | 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | @given('sleep {n:d} seconds') 10 | def sleep(context, n): 11 | time.sleep(n) 12 | 13 | @given('setup default proxysql with name: {name:w} and image: {image}') 14 | def start_default_proxysql(context, name, image): 15 | context.test_env.setup_proxysql( 16 | { 17 | "name": name, 18 | "image": image, 19 | "local_username": "admin", 20 | "local_password": "pwd", 21 | "remote_username": "radmin", 22 | "remote_password": "pwd" 23 | } 24 | ) 25 | 26 | @given('setup default mysql with server_id {server_id:d} and name {name}') 27 | def start_mysql_with_name(context, server_id, name): 28 | context.test_env.setup_mysql_with_name( 29 | {"server_id": server_id, "image": context.mysql_image}, 30 | name=name, 31 | ) 32 | 33 | @given('setup default mysql with config with server_id {server_id:d} and name {name}') 34 | def start_mysql_with_name_and_config(context, server_id, name): 35 | config = context.text 36 | context.test_env.setup_mysql_with_name( 37 | {"server_id": server_id, "image": context.mysql_image}, 38 | name=name, 39 | config=config 40 | ) 41 | 42 | @given('setup default mysql with server_id {server_id:d}') 43 | def start_mysql_with_image(context, server_id): 44 | context.test_env.setup_mysql( 45 | {"server_id": server_id, "image": context.mysql_image} 46 | ) 47 | 48 | @given('setup mysql with config with server_id {server_id:d}') 49 | def start_mysql_with_config(context, server_id): 50 | config = context.text 51 | context.test_env.setup_mysql( 52 | {"server_id": server_id, "image": context.mysql_image}, config=config 53 | ) 54 | 55 | @given('setup mysql_manager with name {name:w} with env ETCD_HOST={etcd_host:w} ETCD_USERNAME={etcd_username:w} ETCD_PASSWORD={etcd_password:w} ETCD_PREFIX={etcd_prefix}') 56 | def start_mysql_manager(context, name, etcd_host, etcd_username, etcd_password, etcd_prefix): 57 | envs = { 58 | "ETCD_HOST": etcd_host, 59 | "ETCD_USERNAME": etcd_username, 60 | "ETCD_PASSWORD": etcd_password, 61 | "ETCD_PREFIX": etcd_prefix 62 | } 63 | context.test_env.setup_mysql_manager( 64 | {"name": name, "image": context.mysql_manager_image, "envs": envs} 65 | ) 66 | 67 | 68 | @given("setup haproxy with name {name} with env ETCD_HOST={etcd_host} ETCD_USERNAME={etcd_username} ETCD_PASSWORD={etcd_password} ETCD_PREFIX={etcd_prefix}") 69 | def start_haproxy(context, name, etcd_host, etcd_username, etcd_password, etcd_prefix): 70 | envs = { 71 | "ETCD_HOST": etcd_host, 72 | "ETCD_USERNAME": etcd_username, 73 | "ETCD_PASSWORD": etcd_password, 74 | "ETCD_PREFIX": etcd_prefix 75 | } 76 | context.test_env.setup_haproxy( 77 | {"name": name, "image": context.haproxy_image, "envs": envs} 78 | ) 79 | 80 | 81 | @given('setup mysql_manager with remote({rhost}, {ruser}, {rpassword}, {rport:d}) with name {name:w} with env ETCD_HOST={etcd_host:w} ETCD_USERNAME={etcd_username:w} ETCD_PASSWORD={etcd_password:w} ETCD_PREFIX={etcd_prefix}') 82 | def start_mysql_manager_with_remote(context, rhost, ruser, rpassword, rport, name, etcd_host, etcd_username, etcd_password, etcd_prefix): 83 | envs = { 84 | "ETCD_HOST": etcd_host, 85 | "ETCD_USERNAME": etcd_username, 86 | "ETCD_PASSWORD": etcd_password, 87 | "ETCD_PREFIX": etcd_prefix 88 | } 89 | context.test_env.setup_mysql_manager( 90 | {"name": name, "image": context.mysql_manager_image, "envs": envs}, 91 | remote={ 92 | "host": rhost, 93 | "user": ruser, 94 | "password": rpassword, 95 | "port": rport, 96 | } 97 | ) 98 | 99 | @given('setup etcd with name {name:w} and image: {image}') 100 | def start_etcd(context, name, image): 101 | context.test_env.setup_etcd( 102 | {"name": name, "image": image} 103 | ) 104 | 105 | @given('setup user root with password: {password} for etcd') 106 | def setup_root_user_for_etcd(context, password): 107 | context.test_env.etcd.exec( 108 | f'etcdctl user add root --new-user-password=f"{password}"', 109 | ) 110 | context.test_env.etcd.exec( 111 | 'etcdctl user grant-role root root' 112 | ) 113 | 114 | @given('setup user {name:w} for etcd with password: {password} access to path {path}') 115 | def setup_user_for_etcd(context, name, password, path): 116 | context.test_env.etcd.exec( 117 | f'etcdctl user add {name} --new-user-password="{password}"' 118 | ) 119 | 120 | context.test_env.etcd.exec( 121 | f'etcdctl role add {name}' 122 | ) 123 | 124 | context.test_env.etcd.exec( 125 | f'etcdctl role grant-permission {name} --prefix=true readwrite {path}' 126 | ) 127 | 128 | context.test_env.etcd.exec( 129 | f'etcdctl user grant-role {name} {name}' 130 | ) 131 | 132 | context.test_env.etcd.exec( 133 | f'etcdctl auth enable' 134 | ) 135 | 136 | @given('init mysql cluster spec') 137 | def init_mysql_cluster_spec(context,): 138 | context.test_env.mysql_manager.exec( 139 | f'python cli/mysql-cli.py init -f /etc/mm/cluster-spec.yaml' 140 | ) 141 | 142 | @given('init mysql cluster spec standby of remote mysql') 143 | def init_mysql_cluster_spec_with_remote(context,): 144 | context.test_env.mysql_manager.exec( 145 | f'python cli/mysql-cli.py init -f /etc/mm/cluster-spec.yaml --standby' 146 | ) 147 | 148 | @given('promote mysql cluster') 149 | def promote_mysql_cluster(context,): 150 | context.test_env.mysql_manager.exec( 151 | f'python cli/mysql-cli.py promote' 152 | ) 153 | 154 | @given('change fail interval to {n:d} seconds') 155 | def change_file_interval(context, n): 156 | context.test_env.mysql_manager.exec( 157 | f'python cli/mysql-cli.py set-fail-interval {n}' 158 | ) 159 | 160 | @given('add mysql to cluster with host: {host} and name: {name} and user: {user} and password: {password}') 161 | def add_mysql_to_cluster(context, host, user, password, name): 162 | context.test_env.mysql_manager.exec( 163 | f"python cli/mysql-cli.py add -h {host} -u {user} -p {password} -n {name}" 164 | ) 165 | 166 | @given('remove mysql with name: {name}') 167 | def remove_mysql_from_cluster(context, name): 168 | context.test_env.mysql_manager.exec( 169 | f"python cli/mysql-cli.py remove -n {name}" 170 | ) 171 | 172 | @given('stop mysql with server_id {server_id:d}') 173 | def stop_mysql(context, server_id): 174 | context.test_env.stop_mysql(server_id) 175 | 176 | @given('start mysql with server_id {server_id:d}') 177 | def start_mysql(context, server_id): 178 | context.test_env.start_mysql(server_id) 179 | 180 | @given('restart mysql manager with env ETCD_HOST={etcd_host:w} ETCD_USERNAME={etcd_username:w} ETCD_PASSWORD={etcd_password:w} ETCD_PREFIX={etcd_prefix}') 181 | def restart_mysql_manager(context, etcd_host, etcd_username, etcd_password, etcd_prefix): 182 | envs = { 183 | "ETCD_HOST": etcd_host, 184 | "ETCD_USERNAME": etcd_username, 185 | "ETCD_PASSWORD": etcd_password, 186 | "ETCD_PREFIX": etcd_prefix, 187 | } 188 | context.test_env.restart_mysql_manager(envs) 189 | 190 | @step('execute mysql query with user: {user:w}, password: {password:w}, host: {host} and port: {port} query: {query}') 191 | def exec_query(context, user, password, host, port, query): 192 | mysql = context.test_env.get_one_up_mysql() 193 | command = f"""mysql -u{user} -p{password} -h {host} -P {port} -e "{query}" 194 | """ 195 | mysql.exec(command) 196 | 197 | @then('result of query: "{query}" with user: {user:w} and password: {password: w} on host: {host} and port: {port} should be') 198 | def evaluate_query_result(context, query, user, password, host, port): 199 | expected_result = context.text 200 | mysql = context.test_env.get_one_up_mysql() 201 | command = f"""mysql -u{user} -p{password} -h {host} -P {port} -X -e "{query}" 202 | """ 203 | output = mysql.exec(command).output.decode() 204 | logger.log(level=1, msg=output) 205 | output = output.split("mysql: [Warning] Using a password on the command line interface can be insecure.\n") 206 | output = output[1] 207 | assert xmltodict.parse(output) == xmltodict.parse(expected_result) 208 | 209 | 210 | @then('cluster status must be') 211 | def evaluate_cluster_status(context): 212 | expected_result = context.text 213 | output = context.test_env.mysql_manager.exec( 214 | "python cli/mysql-cli.py mysql get-cluster-status" 215 | ).output.decode() 216 | logger.log(level=1, msg=output) 217 | assert output == expected_result 218 | 219 | @then('logs of mm must contain') 220 | def search_for_logs_in_mm(context,): 221 | error_text = context.text 222 | assert error_text in context.test_env.mysql_manager.logs() 223 | -------------------------------------------------------------------------------- /tests/features/two-mysqls-and-haproxy.feature: -------------------------------------------------------------------------------- 1 | Feature: two-mysqls-and-two-haproxies 2 | Setup two mysqls and two haproxies with mm 3 | 4 | Scenario: check start two mysqls with two haproxies 5 | Given setup etcd with name etcd and image: quay.hamdocker.ir/coreos/etcd:v3.5.9-amd64 6 | And setup user root with password: password for etcd 7 | And setup user mm for etcd with password: password access to path mm/cluster1/ 8 | And setup default mysql with server_id 1 9 | And setup default mysql with server_id 2 10 | And setup mysql_manager with name mm with env ETCD_HOST=etcd ETCD_USERNAME=mm ETCD_PASSWORD=password ETCD_PREFIX=mm/cluster1/ 11 | And setup haproxy with name hap1 with env ETCD_HOST=http://etcd:2379 ETCD_USERNAME=mm ETCD_PASSWORD=password ETCD_PREFIX=mm/cluster1/ 12 | And setup haproxy with name hap2 with env ETCD_HOST=http://etcd:2379 ETCD_USERNAME=mm ETCD_PASSWORD=password ETCD_PREFIX=mm/cluster1/ 13 | And init mysql cluster spec 14 | And sleep 30 seconds 15 | Then cluster status must be 16 | """ 17 | source=up 18 | replica=up 19 | 20 | """ 21 | When execute mysql query with user: hamadmin, password: password, host: hap2 and port: 3306 query: use hamdb; CREATE TABLE t1 (c1 INT PRIMARY KEY, c2 TEXT NOT NULL);INSERT INTO t1 VALUES (1, 'Luis'); 22 | Then result of query: "select @@global.server_id;" with user: hamadmin and password: password on host: hap1 and port: 3306 should be 23 | """ 24 | 25 | 26 | 27 | 28 | 1 29 | 30 | 31 | """ 32 | Then result of query: "select @@global.server_id;" with user: hamadmin and password: password on host: hap2 and port: 3306 should be 33 | """ 34 | 35 | 36 | 37 | 38 | 1 39 | 40 | 41 | """ 42 | Then result of query: "select @@global.server_id;" with user: hamadmin and password: password on host: hap1 and port: 3307 should be 43 | """ 44 | 45 | 46 | 47 | 48 | 2 49 | 50 | 51 | """ 52 | Then result of query: "select @@global.server_id;" with user: hamadmin and password: password on host: hap2 and port: 3307 should be 53 | """ 54 | 55 | 56 | 57 | 58 | 2 59 | 60 | 61 | """ 62 | Then result of query: "select * from hamdb.t1;" with user: hamadmin and password: password on host: hap1 and port: 3306 should be 63 | """ 64 | 65 | 66 | 67 | 68 | 1 69 | Luis 70 | 71 | 72 | """ 73 | 74 | Then result of query: "select * from hamdb.t1;" with user: hamadmin and password: password on host: hap2 and port: 3306 should be 75 | """ 76 | 77 | 78 | 79 | 80 | 1 81 | Luis 82 | 83 | 84 | """ 85 | 86 | Then result of query: "select * from hamdb.t1;" with user: root and password: root on host: mysql-s1 and port: 3306 should be 87 | """ 88 | 89 | 90 | 91 | 92 | 1 93 | Luis 94 | 95 | 96 | """ 97 | 98 | Then result of query: "select * from hamdb.t1;" with user: root and password: root on host: mysql-s2 and port: 3306 should be 99 | """ 100 | 101 | 102 | 103 | 104 | 1 105 | Luis 106 | 107 | 108 | """ 109 | 110 | # Then result of query: "USE mysql; SHOW EVENTS;" with user: root and password: root on host: mysql-s1 and port: 3306 should be 111 | # """ 112 | # """ 113 | 114 | Then result of query: "SELECT user FROM mysql.user;" with user: root and password: root on host: mysql-s1 and port: 3306 should be 115 | """ 116 | 117 | 118 | 119 | 120 | exporter 121 | 122 | 123 | 124 | hamadmin 125 | 126 | 127 | 128 | replica 129 | 130 | 131 | 132 | root 133 | 134 | 135 | 136 | mysql.infoschema 137 | 138 | 139 | 140 | mysql.session 141 | 142 | 143 | 144 | mysql.sys 145 | 146 | 147 | 148 | root 149 | 150 | 151 | """ 152 | 153 | Then result of query: "show grants for hamadmin;" with user: root and password: root on host: mysql-s1 and port: 3306 should be 154 | """ 155 | 156 | 157 | 158 | 159 | GRANT SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, RELOAD, SHUTDOWN, PROCESS, FILE, REFERENCES, INDEX, ALTER, SHOW DATABASES, CREATE TEMPORARY TABLES, LOCK TABLES, EXECUTE, REPLICATION SLAVE, REPLICATION CLIENT, CREATE VIEW, SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, CREATE USER, EVENT, TRIGGER, CREATE TABLESPACE, CREATE ROLE, DROP ROLE ON *.* TO `hamadmin`@`%` WITH GRANT OPTION 160 | 161 | 162 | 163 | GRANT APPLICATION_PASSWORD_ADMIN,AUDIT_ABORT_EXEMPT,AUDIT_ADMIN,AUTHENTICATION_POLICY_ADMIN,BACKUP_ADMIN,BINLOG_ADMIN,BINLOG_ENCRYPTION_ADMIN,CLONE_ADMIN,ENCRYPTION_KEY_ADMIN,FIREWALL_EXEMPT,FLUSH_OPTIMIZER_COSTS,FLUSH_STATUS,FLUSH_TABLES,FLUSH_USER_RESOURCES,GROUP_REPLICATION_ADMIN,GROUP_REPLICATION_STREAM,INNODB_REDO_LOG_ARCHIVE,INNODB_REDO_LOG_ENABLE,PASSWORDLESS_USER_ADMIN,PERSIST_RO_VARIABLES_ADMIN,REPLICATION_APPLIER,RESOURCE_GROUP_ADMIN,RESOURCE_GROUP_USER,ROLE_ADMIN,SENSITIVE_VARIABLES_OBSERVER,SERVICE_CONNECTION_ADMIN,SESSION_VARIABLES_ADMIN,SET_USER_ID,SHOW_ROUTINE,SYSTEM_USER,SYSTEM_VARIABLES_ADMIN,TABLE_ENCRYPTION_ADMIN,TELEMETRY_LOG_ADMIN,XA_RECOVER_ADMIN ON *.* TO `hamadmin`@`%` WITH GRANT OPTION 164 | 165 | 166 | """ 167 | 168 | Then result of query: "show databases;" with user: root and password: root on host: mysql-s1 and port: 3306 should be 169 | """ 170 | 171 | 172 | 173 | 174 | hamdb 175 | 176 | 177 | 178 | information_schema 179 | 180 | 181 | 182 | mysql 183 | 184 | 185 | 186 | performance_schema 187 | 188 | 189 | 190 | sys 191 | 192 | 193 | 194 | test 195 | 196 | 197 | """ 198 | -------------------------------------------------------------------------------- /tests/integration_test/environment/component_provider.py: -------------------------------------------------------------------------------- 1 | from typing import Type, Union 2 | 3 | from testcontainers.core.container import Network 4 | from testcontainers.core.generic import DockerContainer 5 | 6 | 7 | class ComponentProvider: 8 | def __init__( 9 | self, 10 | name:str, 11 | network: Network, 12 | image: str, 13 | component: Type[DockerContainer], 14 | component_kwargs: dict = {}, 15 | is_up: bool = True 16 | ) -> None: 17 | self.name = name 18 | self.image = image 19 | self.network = network 20 | self.component = component(image, **component_kwargs) 21 | self.is_up = is_up 22 | 23 | def setup(self) -> None: 24 | self.component.with_name( 25 | self.name 26 | ).with_network( 27 | self.network 28 | ).with_network_aliases( 29 | self.name 30 | ).with_kwargs( 31 | restart_policy={"Name": "always"} 32 | ) 33 | 34 | def start(self): 35 | self.is_up = True 36 | self.component.start() 37 | 38 | def destroy(self): 39 | self.is_up = False 40 | self.component.stop() 41 | 42 | def exec(self, command): 43 | return self.component.exec(command) 44 | 45 | def logs(self,): 46 | # This function returns std_err and std_out errors at the same time 47 | return self.component.get_logs()[0].decode() + self.component.get_logs()[1].decode() 48 | 49 | def set_env(self, envs: dict): 50 | for key, value in envs.items(): 51 | self.component.with_env(key, value) 52 | 53 | -------------------------------------------------------------------------------- /tests/integration_test/environment/etcd/etcd_container_provider.py: -------------------------------------------------------------------------------- 1 | import os 2 | from testcontainers.core.container import Network 3 | from testcontainers.mysql import MySqlContainer 4 | from tests.integration_test.environment.component_provider import ComponentProvider 5 | from testcontainers.core.generic import DockerContainer 6 | 7 | 8 | class EtcdContainerProvider(ComponentProvider): 9 | def __init__(self, 10 | name: str, 11 | network: Network, 12 | image: str, 13 | ) -> None: 14 | super().__init__( 15 | name=name, 16 | network=network, 17 | image=image, 18 | component=DockerContainer, 19 | ) 20 | 21 | def setup(self): 22 | super().setup() 23 | self.component.with_command( 24 | [ 25 | "etcd", 26 | "--name=mm-etcd", 27 | f"--advertise-client-urls=http://{self.name}:2379", 28 | "--initial-cluster-token=etcd-cluster", 29 | "--initial-cluster-state=new", 30 | "--listen-client-urls=http://0.0.0.0:2379", 31 | "--listen-metrics-urls=http://0.0.0.0:2381", 32 | "--listen-peer-urls=http://0.0.0.0:2380", 33 | "--auto-compaction-mode=revision", 34 | "--auto-compaction-retention=5" 35 | ] 36 | ).with_exposed_ports( 37 | 2379, 2380, 2381 38 | ) 39 | -------------------------------------------------------------------------------- /tests/integration_test/environment/haproxy/haproxy_container_provider.py: -------------------------------------------------------------------------------- 1 | import os 2 | from testcontainers.core.generic import DockerContainer 3 | from testcontainers.core.network import Network 4 | from tests.integration_test.environment.component_provider import ComponentProvider 5 | 6 | 7 | class HAProxyContainerProvider(ComponentProvider): 8 | def __init__(self, 9 | name: str, 10 | image: str, 11 | network: Network, 12 | ) -> None: 13 | super().__init__( 14 | name=name, 15 | network=network, 16 | image=image, 17 | component=DockerContainer 18 | ) 19 | 20 | def setup(self): 21 | super().setup() 22 | 23 | -------------------------------------------------------------------------------- /tests/integration_test/environment/mysql/mysql_container_provider.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | from typing import Union 4 | from testcontainers.core.container import Network 5 | from testcontainers.mysql import MySqlContainer 6 | from tests.integration_test.environment.component_provider import ComponentProvider 7 | 8 | 9 | class MysqlContainerProvider(ComponentProvider): 10 | def __init__(self, 11 | name: str, 12 | server_id: int, 13 | network: Network, 14 | image: str, 15 | config: str, 16 | root_username: str = "root", 17 | root_password: str = "root", 18 | ) -> None: 19 | super().__init__( 20 | name=name, 21 | network=network, 22 | image=image, 23 | component=MySqlContainer, 24 | component_kwargs={ 25 | "username": root_username, 26 | "password": root_password 27 | } 28 | ) 29 | self.root_username = root_username 30 | self.root_password = root_password 31 | self.server_id = server_id 32 | self.config = config 33 | 34 | def _write_config(self) -> str: 35 | config_path = f"/tmp/configs/mysql/mysql_{self.server_id}.cnf" 36 | os.makedirs(os.path.dirname(config_path), exist_ok=True) 37 | with open(config_path, "w") as f: 38 | f.writelines(self.config) 39 | return config_path 40 | 41 | def setup(self): 42 | super().setup() 43 | config_path = self._write_config() 44 | self.component.with_volume_mapping( 45 | host=config_path, 46 | container="/etc/mysql/conf.d/mysql.cnf" 47 | ) 48 | -------------------------------------------------------------------------------- /tests/integration_test/environment/mysql_manager/mysql_manager_container_provider.py: -------------------------------------------------------------------------------- 1 | import os 2 | from testcontainers.core.generic import DockerContainer 3 | from testcontainers.core.image import DockerImage 4 | from testcontainers.core.network import Network 5 | from tests.integration_test.environment.component_provider import ComponentProvider 6 | 7 | 8 | class MysqlManagerContainerProvider(ComponentProvider): 9 | def __init__(self, 10 | name: str, 11 | image: str, 12 | network: Network, 13 | config: str 14 | ) -> None: 15 | super().__init__( 16 | name=name, 17 | network=network, 18 | image=image, 19 | component=DockerContainer 20 | ) 21 | self.config = config 22 | 23 | def _write_config(self) -> str: 24 | config_path = "/tmp/configs/mm/cluster-spec.yaml" 25 | os.makedirs(os.path.dirname(config_path), exist_ok=True) 26 | with open(config_path, "w") as f: 27 | f.writelines(self.config) 28 | return config_path 29 | 30 | def setup(self): 31 | super().setup() 32 | config_path = self._write_config() 33 | self.component.with_volume_mapping( 34 | host=config_path, 35 | container="/etc/mm/cluster-spec.yaml" 36 | ) 37 | -------------------------------------------------------------------------------- /tests/integration_test/environment/proxysql/proxysql_container_provider.py: -------------------------------------------------------------------------------- 1 | import os 2 | from testcontainers.core.container import DockerContainer, Network 3 | from tests.integration_test.environment.component_provider import ComponentProvider 4 | 5 | 6 | class ProxysqlContainerProvider(ComponentProvider): 7 | def __init__(self, 8 | name: str, 9 | network: Network, 10 | image: str, 11 | local_username: str, 12 | local_password: str, 13 | remote_username: str, 14 | remote_password: str, 15 | config: str 16 | ) -> None: 17 | super().__init__( 18 | name=name, 19 | network=network, 20 | image=image, 21 | component=DockerContainer 22 | ) 23 | self.local_username = local_username 24 | self.local_password = local_password 25 | self.remote_username = remote_username 26 | self.remote_password = remote_password 27 | self.config = config 28 | 29 | def _write_config(self) -> str: 30 | config_path = os.path.join( 31 | os.getcwd(), 32 | "configs/proxysql/proxysql.cnf" 33 | ) 34 | os.makedirs(os.path.dirname(config_path), exist_ok=True) 35 | with open(config_path, "w") as f: 36 | f.writelines(self.config) 37 | return config_path 38 | 39 | def setup(self): 40 | super().setup() 41 | config_path = self._write_config() 42 | self.component.with_volume_mapping( 43 | host=config_path, 44 | container="/etc/proxysql.cnf" 45 | ) 46 | -------------------------------------------------------------------------------- /tests/integration_test/environment/test_environment_factory.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | from testcontainers.core.container import Network 3 | from tests.integration_test.environment.etcd.etcd_container_provider import EtcdContainerProvider 4 | from tests.integration_test.environment.mysql.mysql_container_provider import MysqlContainerProvider 5 | from tests.integration_test.environment.mysql_manager.mysql_manager_container_provider import MysqlManagerContainerProvider 6 | from tests.integration_test.environment.proxysql.proxysql_container_provider import ProxysqlContainerProvider 7 | from tests.integration_test.environment.haproxy.haproxy_container_provider import ( 8 | HAProxyContainerProvider 9 | ) 10 | 11 | 12 | class TestEnvironmentFactory: 13 | def __init__( 14 | self, 15 | ) -> None: 16 | self.mysqls = [] 17 | self.proxysqls = [] 18 | self.mysql_manager = None 19 | self.etcd = None 20 | self.haproxys = [] 21 | self.remote = None 22 | self.network = Network().create() 23 | 24 | def _get_default_mysql_config_template(self): 25 | return """[mysqld] 26 | server-id = {} 27 | gtid-mode = ON 28 | enforce-gtid-consistency = ON 29 | log-bin = binlog 30 | relay-log = relaylog 31 | datadir = /var/lib/mysql 32 | binlog_expire_logs_seconds = 259200 33 | binlog_expire_logs_auto_purge = ON 34 | max_binlog_size = 104857600 35 | slow_query_log = 1 36 | long_query_time = 1 37 | slow_query_log_file = /var/lib/mysql/slow.log 38 | max_connections = 1000 39 | """ 40 | 41 | def _get_mysql_manager_config(self, remote: dict=None): 42 | config = { 43 | "mysqls": {} 44 | } 45 | for i, mysql in enumerate(self.mysqls): 46 | config["mysqls"]["s"+str(i+1)] = { 47 | "host": mysql.name, 48 | "user": mysql.root_username, 49 | "password": mysql.root_password 50 | } 51 | 52 | config["users"] = { 53 | "replPassword": "password", 54 | "exporterPassword": "exporter", 55 | "nonprivPassword": "password", 56 | "nonprivUser": "hamadmin", 57 | } 58 | 59 | if remote is not None: 60 | config["remote"] = remote 61 | 62 | return yaml.safe_dump(config) 63 | 64 | def get_one_up_mysql(self): 65 | for mysql in self.mysqls: 66 | if mysql.is_up: 67 | return mysql 68 | 69 | def setup_mysql(self, mysql: dict, config: str | None = None): 70 | self.setup_mysql_with_name(mysql, f"mysql-s{mysql['server_id']}", config) 71 | 72 | def setup_mysql_with_name(self, mysql, name: str, config: str | None = None): 73 | if config is None: 74 | config = self._get_default_mysql_config_template().format(mysql["server_id"]) 75 | component = MysqlContainerProvider( 76 | server_id=mysql["server_id"], 77 | name=name, 78 | network=self.network, 79 | image=mysql["image"], 80 | config=config 81 | ) 82 | if name == "remote": 83 | self.remote = component 84 | else: 85 | self.mysqls.append( 86 | component 87 | ) 88 | 89 | component.setup() 90 | component.start() 91 | 92 | def setup_proxysql(self, proxysql): 93 | component = ProxysqlContainerProvider( 94 | name=proxysql["name"], 95 | network=self.network, 96 | image=proxysql["image"], 97 | local_username=proxysql["local_username"], 98 | local_password=proxysql["local_password"], 99 | remote_username=proxysql["remote_username"], 100 | remote_password=proxysql["remote_password"], 101 | config=self._get_default_proxysql_config_template().format( 102 | proxysql["local_username"], 103 | proxysql["local_password"], 104 | proxysql["remote_username"], 105 | proxysql["remote_password"], 106 | ) 107 | ) 108 | self.proxysqls.append( 109 | component 110 | ) 111 | component.setup() 112 | component.start() 113 | 114 | def setup_mysql_manager(self, mysql_manager, remote: dict=None): 115 | self.mysql_manager = MysqlManagerContainerProvider( 116 | name=mysql_manager["name"], 117 | network=self.network, 118 | image=mysql_manager["image"], 119 | config=self._get_mysql_manager_config(remote) 120 | ) 121 | self.mysql_manager.set_env(mysql_manager["envs"]) 122 | self.mysql_manager.setup() 123 | self.mysql_manager.start() 124 | 125 | def setup_haproxy(self, haproxy): 126 | component = HAProxyContainerProvider( 127 | name=haproxy["name"], 128 | network=self.network, 129 | image=haproxy["image"], 130 | ) 131 | component.set_env(haproxy["envs"]) 132 | self.haproxys.append(component) 133 | component.setup() 134 | component.start() 135 | 136 | def setup_etcd(self, etcd): 137 | self.etcd = EtcdContainerProvider( 138 | name=etcd["name"], 139 | network=self.network, 140 | image=etcd["image"], 141 | ) 142 | self.etcd.setup() 143 | self.etcd.start() 144 | 145 | def stop(self): 146 | for mysql in self.mysqls: 147 | mysql.destroy() 148 | for haproxy in self.haproxys: 149 | haproxy.destroy() 150 | self.mysql_manager.destroy() 151 | self.etcd.destroy() 152 | if self.remote is not None: 153 | self.remote.destroy() 154 | self.network.remove() 155 | 156 | def stop_mysql(self, server_id: int): 157 | for mysql in self.mysqls: 158 | if mysql.server_id == server_id: 159 | mysql.destroy() 160 | 161 | def start_mysql(self, server_id: int): 162 | for mysql in self.mysqls: 163 | if mysql.server_id == server_id: 164 | mysql.setup() 165 | mysql.start() 166 | 167 | def restart_mysql_manager(self, envs): 168 | self.mysql_manager.destroy() 169 | self.setup_mysql_manager( 170 | { 171 | "name": self.mysql_manager.name, 172 | "image": self.mysql_manager.image, 173 | "envs": envs, 174 | } 175 | ) 176 | -------------------------------------------------------------------------------- /tests/k8s/mysql-1-proxysql-components.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 6 | component: mysql 7 | release: release-all 8 | name: release-all-mysql-s1-data 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 1Gi 15 | storageClassName: rawfile-ext4 16 | --- 17 | apiVersion: v1 18 | kind: ConfigMap 19 | data: 20 | mysql.cnf: | 21 | [mysqld] 22 | server-id = 1 23 | gtid-mode = ON 24 | enforce-gtid-consistency = ON 25 | log-bin = binlog 26 | relay-log = relaylog 27 | datadir = /var/lib/mysql 28 | binlog_expire_logs_seconds = 259200 29 | binlog_expire_logs_auto_purge = ON 30 | max_binlog_size = 104857600 31 | slow_query_log = 1 32 | long_query_time = 1 33 | slow_query_log_file = /var/lib/mysql/slow.log 34 | max_connections = 1000 35 | metadata: 36 | labels: 37 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 38 | component: mysql 39 | release: release-all 40 | name: release-all-mysql-s1-config 41 | --- 42 | apiVersion: apps/v1 43 | kind: StatefulSet 44 | metadata: 45 | labels: 46 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 47 | component: mysql 48 | mysql-replication: gtid-async 49 | mysql-server-id: "1" 50 | release: release-all 51 | name: release-all-mysql-s1 52 | spec: 53 | replicas: 1 54 | selector: 55 | matchLabels: 56 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 57 | component: mysql 58 | mysql-replication: gtid-async 59 | mysql-server-id: "1" 60 | release: release-all 61 | serviceName: mysql-s1 62 | template: 63 | metadata: 64 | labels: 65 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 66 | component: mysql 67 | mysql-replication: gtid-async 68 | mysql-server-id: "1" 69 | release: release-all 70 | spec: 71 | affinity: 72 | podAntiAffinity: 73 | preferredDuringSchedulingIgnoredDuringExecution: 74 | - podAffinityTerm: 75 | labelSelector: 76 | matchExpressions: 77 | - key: component 78 | operator: In 79 | values: 80 | - mysql 81 | - key: release 82 | operator: In 83 | values: 84 | - release-all 85 | topologyKey: kubernetes.io/hostname 86 | weight: 100 87 | requiredDuringSchedulingIgnoredDuringExecution: [] 88 | containers: 89 | - env: 90 | - name: MYSQL_ROOT_PASSWORD 91 | value: password 92 | image: hub.hamdocker.ir/library/mysql:8.0.35-bullseye 93 | imagePullPolicy: IfNotPresent 94 | name: main 95 | ports: 96 | - containerPort: 3306 97 | name: mysql 98 | protocol: TCP 99 | resources: 100 | limits: 101 | cpu: 800m 102 | ephemeral-storage: 500Mi 103 | memory: 1000M 104 | requests: 105 | cpu: 40m 106 | ephemeral-storage: 50Mi 107 | memory: 80M 108 | volumeMounts: 109 | - mountPath: /etc/mysql/conf.d/ 110 | name: config 111 | - mountPath: /var/lib/mysql 112 | name: data 113 | enableServiceLinks: false 114 | restartPolicy: Always 115 | terminationGracePeriodSeconds: 60 116 | volumes: 117 | - configMap: 118 | name: release-all-mysql-s1-config 119 | name: config 120 | - name: data 121 | persistentVolumeClaim: 122 | claimName: release-all-mysql-s1-data 123 | volumeClaimTemplates: [] 124 | --- 125 | apiVersion: v1 126 | kind: Service 127 | metadata: 128 | labels: 129 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 130 | component: mysql 131 | release: release-all 132 | name: release-all-mysql-s1-svc 133 | spec: 134 | ports: 135 | - name: mysql 136 | port: 3306 137 | protocol: TCP 138 | targetPort: 3306 139 | selector: 140 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 141 | component: mysql 142 | mysql-server-id: "1" 143 | release: release-all 144 | type: ClusterIP 145 | --- 146 | apiVersion: v1 147 | kind: ConfigMap 148 | data: 149 | my.cnf: | 150 | [client] 151 | host = release-all-mysql-s1-svc 152 | user = exporter 153 | password = password 154 | metadata: 155 | labels: 156 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 157 | component: mysql_exporter 158 | release: release-all 159 | name: release-all-mysql-exporter-s1-config 160 | --- 161 | apiVersion: apps/v1 162 | kind: Deployment 163 | metadata: 164 | labels: 165 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 166 | component: mysql_exporter 167 | mysql-replication: gtid-async 168 | mysql-server-id: "1" 169 | release: release-all 170 | name: release-all-mysql-exporter-s1 171 | spec: 172 | replicas: 1 173 | selector: 174 | matchLabels: 175 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 176 | component: mysql_exporter 177 | mysql-replication: gtid-async 178 | mysql-server-id: "1" 179 | release: release-all 180 | template: 181 | metadata: 182 | labels: 183 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 184 | component: mysql_exporter 185 | mysql-replication: gtid-async 186 | mysql-server-id: "1" 187 | release: release-all 188 | spec: 189 | containers: 190 | - args: 191 | - --config.my-cnf=/etc/my.cnf 192 | env: [] 193 | image: hub.hamdocker.ir/prom/mysqld-exporter:v0.15.1 194 | imagePullPolicy: IfNotPresent 195 | name: main 196 | ports: 197 | - containerPort: 9104 198 | name: metrics 199 | protocol: TCP 200 | resources: 201 | limits: 202 | cpu: 100m 203 | ephemeral-storage: 100Mi 204 | memory: 200Mi 205 | requests: 206 | cpu: 10m 207 | ephemeral-storage: 10Mi 208 | memory: 20Mi 209 | volumeMounts: 210 | - mountPath: /etc/my.cnf 211 | name: config 212 | subPath: my.cnf 213 | enableServiceLinks: false 214 | restartPolicy: Always 215 | terminationGracePeriodSeconds: 60 216 | volumes: 217 | - configMap: 218 | name: release-all-mysql-exporter-s1-config 219 | name: config 220 | --- 221 | apiVersion: v1 222 | kind: Service 223 | metadata: 224 | labels: 225 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 226 | component: mysql_exporter 227 | mysql-server-id: "1" 228 | release: release-all 229 | name: release-all-mysql-exporter-s1-svc 230 | spec: 231 | ports: 232 | - name: metrics 233 | port: 9104 234 | protocol: TCP 235 | targetPort: 9104 236 | selector: 237 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 238 | component: mysql_exporter 239 | mysql-server-id: "1" 240 | release: release-all 241 | type: ClusterIP 242 | --- 243 | apiVersion: monitoring.coreos.com/v1 244 | kind: ServiceMonitor 245 | metadata: 246 | annotations: 247 | release-name: release-all 248 | release-namespace: dbaas-staging 249 | labels: 250 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 251 | app.kubernetes.io/managed-by: mysql 252 | name: release-all-mysql-s1-exporter 253 | namespace: dbaas-staging 254 | spec: 255 | endpoints: 256 | - metricRelabelings: 257 | - action: replace 258 | regex: (.*) 259 | replacement: mysql 260 | separator: ; 261 | targetLabel: dbaas_service 262 | path: /metrics 263 | targetPort: metrics 264 | namespaceSelector: 265 | matchNames: 266 | - production 267 | selector: 268 | matchLabels: 269 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 270 | component: mysql_exporter 271 | mysql-server-id: "1" 272 | release: release-all 273 | --- 274 | apiVersion: monitoring.coreos.com/v1 275 | kind: ServiceMonitor 276 | metadata: 277 | annotations: 278 | release-name: release-all 279 | release-namespace: dbaas-staging 280 | labels: 281 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 282 | app.kubernetes.io/managed-by: mysql 283 | name: release-all-proxysql 284 | namespace: dbaas-staging 285 | spec: 286 | endpoints: 287 | - metricRelabelings: 288 | - action: replace 289 | regex: (.*) 290 | replacement: mysql 291 | separator: ; 292 | targetLabel: dbaas_service 293 | path: /metrics 294 | targetPort: metrics 295 | namespaceSelector: 296 | matchNames: 297 | - production 298 | selector: 299 | matchLabels: 300 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 301 | component: proxysql 302 | release: release-all 303 | --- 304 | apiVersion: v1 305 | kind: PersistentVolumeClaim 306 | metadata: 307 | labels: 308 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 309 | component: proxysql 310 | release: release-all 311 | name: release-all-proxysql-data 312 | spec: 313 | accessModes: 314 | - ReadWriteOnce 315 | resources: 316 | requests: 317 | storage: 100Mi 318 | storageClassName: rawfile-ext4 319 | --- 320 | apiVersion: v1 321 | kind: ConfigMap 322 | data: 323 | proxysql.cnf: | 324 | datadir="/var/lib/proxysql" 325 | admin_variables= 326 | { 327 | admin_credentials="admin:px-admin;radmin:px-admin" 328 | mysql_ifaces="0.0.0.0:6032" 329 | restapi_enabled=true 330 | restapi_port=6070 331 | } 332 | mysql_variables= 333 | { 334 | threads=4 335 | max_connections=2000 336 | default_query_delay=0 337 | default_query_timeout=360000 338 | interfaces="0.0.0.0:3306" 339 | default_schema="information_schema" 340 | stacksize=1048576 341 | server_version="5.5.30" 342 | connect_timeout_server=1000 343 | connect_retries_on_failure=3 344 | connect_timeout_server_max=10000 345 | monitor_history=600000 346 | monitor_connect_interval=60000 347 | monitor_ping_interval=10000 348 | monitor_read_only_interval=1000 349 | monitor_read_only_timeout=800 350 | ping_interval_server_msec=10000 351 | ping_timeout_server=500 352 | commands_stats=true 353 | sessions_sort=true 354 | } 355 | metadata: 356 | labels: 357 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 358 | component: proxysql 359 | release: release-all 360 | name: release-all-proxysql-config 361 | --- 362 | apiVersion: apps/v1 363 | kind: StatefulSet 364 | metadata: 365 | labels: 366 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 367 | component: proxysql 368 | mysql-replication: gtid-async 369 | release: release-all 370 | name: release-all-proxysql 371 | spec: 372 | replicas: 1 373 | selector: 374 | matchLabels: 375 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 376 | component: proxysql 377 | mysql-replication: gtid-async 378 | release: release-all 379 | serviceName: proxysql 380 | template: 381 | metadata: 382 | labels: 383 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 384 | component: proxysql 385 | mysql-replication: gtid-async 386 | release: release-all 387 | spec: 388 | containers: 389 | - env: [] 390 | image: hub.hamdocker.ir/proxysql/proxysql:2.6.2 391 | imagePullPolicy: IfNotPresent 392 | name: main 393 | ports: 394 | - containerPort: 3306 395 | name: mysql 396 | protocol: TCP 397 | - containerPort: 6032 398 | name: admin 399 | protocol: TCP 400 | - containerPort: 6070 401 | name: metrics 402 | protocol: TCP 403 | resources: 404 | limits: 405 | cpu: 100m 406 | ephemeral-storage: 500Mi 407 | memory: 300M 408 | requests: 409 | cpu: 40m 410 | ephemeral-storage: 50Mi 411 | memory: 80M 412 | volumeMounts: 413 | - mountPath: /etc/proxysql.cnf 414 | name: config 415 | subPath: proxysql.cnf 416 | - mountPath: /var/lib/proxysql 417 | name: data 418 | enableServiceLinks: false 419 | restartPolicy: Always 420 | terminationGracePeriodSeconds: 60 421 | volumes: 422 | - configMap: 423 | name: release-all-proxysql-config 424 | name: config 425 | - name: data 426 | persistentVolumeClaim: 427 | claimName: release-all-proxysql-data 428 | volumeClaimTemplates: [] 429 | --- 430 | apiVersion: v1 431 | kind: Service 432 | metadata: 433 | labels: 434 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 435 | component: proxysql 436 | release: release-all 437 | name: release-all-proxysql-svc 438 | spec: 439 | ports: 440 | - name: mysql 441 | port: 3306 442 | protocol: TCP 443 | targetPort: 3306 444 | - name: admin 445 | port: 6032 446 | protocol: TCP 447 | targetPort: 6032 448 | - name: metrics 449 | port: 6070 450 | protocol: TCP 451 | targetPort: 6070 452 | selector: 453 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 454 | component: proxysql 455 | release: release-all 456 | type: ClusterIP 457 | --- 458 | apiVersion: apps/v1 459 | kind: StatefulSet 460 | metadata: 461 | labels: 462 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 463 | component: backup 464 | mysql-replication: gtid-async 465 | release: release-all 466 | name: release-all-backup 467 | spec: 468 | replicas: 1 469 | selector: 470 | matchLabels: 471 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 472 | component: backup 473 | mysql-replication: gtid-async 474 | release: release-all 475 | serviceName: backup 476 | template: 477 | metadata: 478 | labels: 479 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 480 | component: backup 481 | mysql-replication: gtid-async 482 | release: release-all 483 | spec: 484 | containers: 485 | - command: 486 | - /bin/bash 487 | - -ec 488 | - sleep inf 489 | env: 490 | - name: AWS_SECRET_ACCESS_KEY 491 | value: walg-secret 492 | - name: AWS_ACCESS_KEY_ID 493 | value: walg-access 494 | - name: AWS_ENDPOINT 495 | value: https://dbaas.hs3.ir/ 496 | - name: AWS_S3_FORCE_PATH_STYLE 497 | value: "true" 498 | - name: WALG_MYSQL_DATASOURCE_NAME 499 | value: root:password@tcp(release-all-mysql-s1-svc:3306)/information_schema 500 | - name: WALG_S3_PREFIX 501 | value: s3://customer-1/ 502 | - name: WALG_STREAM_CREATE_COMMAND 503 | value: xtrabackup --backup --host=release-all-mysql-s1-svc --user=root --password=password --stream=xbstream --datadir=/var/lib/mysql 504 | - name: WALG_STREAM_RESTORE_COMMAND 505 | value: xbstream -x -C /var/lib/mysql 506 | - name: WALG_MYSQL_BACKUP_PREPARE_COMMAND 507 | value: xtrabackup --prepare --target-dir=/var/lib/mysql 508 | - name: WALG_MYSQL_BINLOG_DST 509 | value: /var/lib/mysql/__bdst 510 | image: registry.hamdocker.ir/public/walg-xtrabackup:v0.1-8.0.35 511 | imagePullPolicy: IfNotPresent 512 | name: main 513 | resources: 514 | limits: 515 | cpu: 400m 516 | ephemeral-storage: 500Mi 517 | memory: 800M 518 | requests: 519 | cpu: 40m 520 | ephemeral-storage: 50Mi 521 | memory: 80M 522 | volumeMounts: 523 | - mountPath: /var/lib/mysql 524 | name: data 525 | enableServiceLinks: false 526 | restartPolicy: Always 527 | terminationGracePeriodSeconds: 60 528 | volumes: 529 | - name: data 530 | persistentVolumeClaim: 531 | claimName: release-all-mysql-s1-data 532 | volumeClaimTemplates: [] 533 | --- 534 | apiVersion: apps/v1 535 | kind: Deployment 536 | metadata: 537 | labels: 538 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 539 | component: mysql_manager 540 | mysql-replication: gtid-async 541 | release: release-all 542 | name: release-all-mysql-manager 543 | spec: 544 | replicas: 1 545 | selector: 546 | matchLabels: 547 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 548 | component: mysql_manager 549 | mysql-replication: gtid-async 550 | release: release-all 551 | template: 552 | metadata: 553 | labels: 554 | app-id: fd3g53e4-7216-563w-b511-9b38ebbf2hh2 555 | component: mysql_manager 556 | mysql-replication: gtid-async 557 | release: release-all 558 | spec: 559 | containers: 560 | - command: 561 | - /bin/sleep 562 | - inf 563 | env: 564 | - name: MYSQL_S1_HOST 565 | value: release-all-mysql-s1-svc 566 | - name: MYSQL_S2_HOST 567 | value: release-all-mysql-s2-svc 568 | - name: MYSQL_ROOT_PASSWORD 569 | value: password 570 | - name: MYSQL_REPL_PASSWORD 571 | value: password 572 | - name: MYSQL_EXPORTER_PASSWORD 573 | value: password 574 | - name: PROXYSQL_HOST 575 | value: release-all-proxysql-svc 576 | - name: PROXYSQL_PASSWORD 577 | value: px-admin 578 | - name: PROXYSQL_MON_PASSWORD 579 | value: px-monitor 580 | image: registry.hamdocker.ir/public/mysql-manager:v0.2.0 581 | imagePullPolicy: Always 582 | name: main 583 | resources: 584 | limits: 585 | cpu: 50m 586 | ephemeral-storage: 10Mi 587 | memory: 100M 588 | requests: 589 | cpu: 10m 590 | ephemeral-storage: 10Mi 591 | memory: 10M 592 | volumeMounts: [] 593 | enableServiceLinks: false 594 | restartPolicy: Always 595 | terminationGracePeriodSeconds: 60 596 | volumes: [] 597 | -------------------------------------------------------------------------------- /tests/k8s/mysql-1-proxysql.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | MANIFEST_FILE=mysql-1-proxysql-components.yaml 4 | MYSQL_S1_POD_NAME=release-all-mysql-s1-0 5 | PROXYSQL_POD_NAME=release-all-proxysql-0 6 | MYSQL_MANAGER_DEPLOYMENT_NAME=release-all-mysql-manager 7 | 8 | check_namespace () { 9 | current_namespace=`kubectl config view --minify -o jsonpath='{..namespace}'` 10 | if [ $current_namespace != "dbaas-staging" ] 11 | then 12 | echo "Error: This is not dbaas-staging namespace. Change your namespace." 13 | exit 1 14 | fi 15 | } 16 | 17 | echo -e "Deleting objects..." 18 | kubectl delete -f $MANIFEST_FILE 2>/dev/null 19 | kubectl wait --for=delete -f $MANIFEST_FILE 20 | kubectl wait --for=delete pod/$MYSQL_S1_POD_NAME 21 | kubectl wait --for=delete pod/$PROXYSQL_POD_NAME 22 | 23 | 24 | echo -e "\n\nChecking cluster namespace..." 25 | check_namespace 26 | 27 | 28 | echo -e "\n\nDeploying manifests..." 29 | kubectl apply -f $MANIFEST_FILE 30 | kubectl wait --for=condition=Ready pod/$MYSQL_S1_POD_NAME 31 | kubectl wait --for=condition=Ready pod/$PROXYSQL_POD_NAME 32 | 33 | echo -e "\n\nWaiting for mysql to become ready..." 34 | sleep 80 35 | ## TODO: check mysql readiness using mysql manager 36 | 37 | echo -e "\n\nStarting cluster using mysql manager..." 38 | kubectl exec -it deploy/$MYSQL_MANAGER_DEPLOYMENT_NAME -- bash -c 'python /app/cli/mysql-cli.py mysql add --host $MYSQL_S1_HOST --user root --password $MYSQL_ROOT_PASSWORD' 39 | kubectl exec -it deploy/$MYSQL_MANAGER_DEPLOYMENT_NAME -- bash -c 'python /app/cli/mysql-cli.py mysql create-user --host $MYSQL_S1_HOST --user replica --password $MYSQL_REPL_PASSWORD --roles "REPLICATION SLAVE"' 40 | 41 | echo -e "\nCreate monitoring user: " 42 | kubectl exec -it deploy/$MYSQL_MANAGER_DEPLOYMENT_NAME -- bash -c 'python /app/cli/mysql-cli.py mysql create-monitoring-user --host $MYSQL_S1_HOST --password $MYSQL_EXPORTER_PASSWORD' 43 | 44 | echo -e "\nProxysql operations: " 45 | kubectl exec -it deploy/$MYSQL_MANAGER_DEPLOYMENT_NAME -- bash -c 'python /app/cli/mysql-cli.py mysql create-user --host $MYSQL_S1_HOST --user proxysql --password $PROXYSQL_MON_PASSWORD --roles "USAGE,REPLICATION CLIENT"' 46 | 47 | echo -e "\nProxysql add: " 48 | kubectl exec -it deploy/$MYSQL_MANAGER_DEPLOYMENT_NAME -- bash -c 'python /app/cli/mysql-cli.py proxysql add --host $PROXYSQL_HOST --user radmin --password $PROXYSQL_PASSWORD' 49 | 50 | echo -e "\nProxysql initialize: " 51 | kubectl exec -it deploy/$MYSQL_MANAGER_DEPLOYMENT_NAME -- bash -c 'python /app/cli/mysql-cli.py proxysql initialize --host $PROXYSQL_HOST --mysql-user root --mysql-password $MYSQL_ROOT_PASSWORD --monitor-user exporter --monitor-password $MYSQL_EXPORTER_PASSWORD' 52 | 53 | echo -e "\nProxysql add-backend: " 54 | kubectl exec -it deploy/$MYSQL_MANAGER_DEPLOYMENT_NAME -- bash -c 'python /app/cli/mysql-cli.py proxysql add-backend --mysql-host $MYSQL_S1_HOST --proxysql-host $PROXYSQL_HOST --read-weight 1 --is-writer' 55 | 56 | 57 | 58 | echo -e "\n\nWrite to mysql through proxysql..." 59 | kubectl exec $MYSQL_S1_POD_NAME -- mysql -uroot -ppassword -h release-all-proxysql-svc -e "create database sales; use sales; CREATE TABLE t1 (c1 INT PRIMARY KEY, c2 TEXT NOT NULL);INSERT INTO t1 VALUES (1, 'Luis');" 60 | 61 | 62 | echo -e "\n\nCheck data in master..." 63 | kubectl exec -it $MYSQL_S1_POD_NAME -- mysql -uroot -ppassword -e "select * from sales.t1" 64 | 65 | echo -e "\n\nChecking proxysql config and stats..." 66 | sleep 20 67 | kubectl exec -it $MYSQL_S1_POD_NAME -- mysql -uradmin -ppx-admin -h release-all-proxysql-svc -P6032 -e "select * from runtime_mysql_servers" 68 | kubectl exec -it $MYSQL_S1_POD_NAME -- mysql -uradmin -ppx-admin -h release-all-proxysql-svc -P6032 -e "SELECT * FROM monitor.mysql_server_connect_log ORDER BY time_start_us DESC LIMIT 6" 69 | 70 | 71 | echo -e "\n\nChecking metrics..." 72 | sleep 10 73 | kubectl exec -it deploy/$MYSQL_MANAGER_DEPLOYMENT_NAME -- bash -c "curl release-all-mysql-exporter-s1-svc:9104/metrics | grep mysql_up" 74 | 75 | echo -e "\n\nDeleting objects..." 76 | kubectl delete -f $MANIFEST_FILE 2>/dev/null 77 | kubectl wait --for=delete -f $MANIFEST_FILE 78 | -------------------------------------------------------------------------------- /tests/k8s/mysql-2-components.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/hamravesh/mysql-manager/02300bce2f1694a094d600960cec0fc3d3947775/tests/k8s/mysql-2-components.yaml -------------------------------------------------------------------------------- /tests/k8s/mysql-2-proxysql.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | MANIFEST_FILE=mysql-2-proxysql-components.yaml 4 | MYSQL_S1_POD_NAME=release-all-mysql-s1-0 5 | MYSQL_S2_POD_NAME=release-all-mysql-s2-0 6 | PROXYSQL_POD_NAME=release-all-proxysql-0 7 | MYSQL_MANAGER_DEPLOYMENT_NAME=release-all-mysql-manager 8 | 9 | check_namespace () { 10 | current_namespace=`kubectl config view --minify -o jsonpath='{..namespace}'` 11 | if [ $current_namespace != "dbaas-staging" ] 12 | then 13 | echo "Error: This is not dbaas-staging namespace. Change your namespace." 14 | exit 1 15 | fi 16 | } 17 | 18 | echo -e "Deleting objects..." 19 | kubectl delete -f $MANIFEST_FILE 2>/dev/null 20 | kubectl wait --for=delete -f $MANIFEST_FILE 21 | kubectl wait --for=delete pod/$MYSQL_S1_POD_NAME 22 | kubectl wait --for=delete pod/$PROXYSQL_POD_NAME 23 | 24 | 25 | echo -e "\n\nChecking cluster namespace..." 26 | check_namespace 27 | 28 | 29 | echo -e "\n\nDeploying manifests..." 30 | kubectl apply -f $MANIFEST_FILE 31 | kubectl wait --for=condition=Ready pod/$MYSQL_S1_POD_NAME 32 | kubectl wait --for=condition=Ready pod/$PROXYSQL_POD_NAME 33 | 34 | echo -e "\n\nWaiting for mysql to become ready..." 35 | sleep 80 36 | ## TODO: check mysql readiness using mysql manager 37 | 38 | echo -e "\n\nStarting cluster using mysql manager..." 39 | kubectl exec -it deploy/$MYSQL_MANAGER_DEPLOYMENT_NAME -- bash -c 'python /app/cli/mysql-cli.py mysql add --host $MYSQL_S1_HOST --user root --password $MYSQL_ROOT_PASSWORD' 40 | kubectl exec -it deploy/$MYSQL_MANAGER_DEPLOYMENT_NAME -- bash -c 'python /app/cli/mysql-cli.py mysql add --host $MYSQL_S2_HOST --user root --password $MYSQL_ROOT_PASSWORD' 41 | kubectl exec -it deploy/$MYSQL_MANAGER_DEPLOYMENT_NAME -- bash -c 'python /app/cli/mysql-cli.py mysql create-user --host $MYSQL_S1_HOST --user replica --password $MYSQL_REPL_PASSWORD --roles "REPLICATION SLAVE"' 42 | 43 | kubectl exec -it deploy/$MYSQL_MANAGER_DEPLOYMENT_NAME -- bash -c 'python /app/cli/mysql-cli.py mysql add-replica --master $MYSQL_S1_HOST --replica $MYSQL_S2_HOST' 44 | kubectl exec -it deploy/$MYSQL_MANAGER_DEPLOYMENT_NAME -- bash -c 'python /app/cli/mysql-cli.py mysql start-replication --master $MYSQL_S1_HOST --replica $MYSQL_S2_HOST --repl-user replica --repl-password $MYSQL_REPL_PASSWORD' 45 | 46 | echo -e "\nCreate monitoring user: " 47 | kubectl exec -it deploy/$MYSQL_MANAGER_DEPLOYMENT_NAME -- bash -c 'python /app/cli/mysql-cli.py mysql create-monitoring-user --host $MYSQL_S1_HOST --password $MYSQL_EXPORTER_PASSWORD' 48 | 49 | echo -e "\nProxysql operations: " 50 | kubectl exec -it deploy/$MYSQL_MANAGER_DEPLOYMENT_NAME -- bash -c 'python /app/cli/mysql-cli.py mysql create-user --host $MYSQL_S1_HOST --user proxysql --password $PROXYSQL_MON_PASSWORD --roles "USAGE,REPLICATION CLIENT"' 51 | 52 | echo -e "\nProxysql add: " 53 | kubectl exec -it deploy/$MYSQL_MANAGER_DEPLOYMENT_NAME -- bash -c 'python /app/cli/mysql-cli.py proxysql add --host $PROXYSQL_HOST --user radmin --password $PROXYSQL_PASSWORD' 54 | 55 | echo -e "\nProxysql initialize: " 56 | kubectl exec -it deploy/$MYSQL_MANAGER_DEPLOYMENT_NAME -- bash -c 'python /app/cli/mysql-cli.py proxysql initialize --host $PROXYSQL_HOST --mysql-user root --mysql-password $MYSQL_ROOT_PASSWORD --monitor-user exporter --monitor-password $MYSQL_EXPORTER_PASSWORD' 57 | 58 | echo -e "\nProxysql add-backend: " 59 | kubectl exec -it deploy/$MYSQL_MANAGER_DEPLOYMENT_NAME -- bash -c 'python /app/cli/mysql-cli.py proxysql add-backend --mysql-host $MYSQL_S1_HOST --proxysql-host $PROXYSQL_HOST --read-weight 1 --is-writer' 60 | kubectl exec -it deploy/$MYSQL_MANAGER_DEPLOYMENT_NAME -- bash -c 'python /app/cli/mysql-cli.py proxysql add-backend --mysql-host $MYSQL_S2_HOST --proxysql-host $PROXYSQL_HOST --read-weight 1' 61 | 62 | 63 | 64 | echo -e "\n\nWrite to mysql through proxysql..." 65 | kubectl exec $MYSQL_S1_POD_NAME -- mysql -uroot -ppassword -h release-all-proxysql-svc -e "create database sales; use sales; CREATE TABLE t1 (c1 INT PRIMARY KEY, c2 TEXT NOT NULL);INSERT INTO t1 VALUES (1, 'Luis');" 66 | 67 | 68 | echo -e "\n\nCheck data in master..." 69 | kubectl exec -it $MYSQL_S1_POD_NAME -- mysql -uroot -ppassword -e "select * from sales.t1" 70 | 71 | echo -e "\n\nCheck data in replica..." 72 | kubectl exec -it $MYSQL_S2_POD_NAME -- mysql -uroot -ppassword -e "select * from sales.t1" 73 | kubectl exec -it $MYSQL_S2_POD_NAME -- mysql -uroot -ppassword -e "select @@global.super_read_only" 74 | 75 | 76 | echo -e "\n\nChecking proxysql config and stats..." 77 | sleep 20 78 | kubectl exec -it $MYSQL_S1_POD_NAME -- mysql -uradmin -ppx-admin -h release-all-proxysql-svc -P6032 -e "select * from runtime_mysql_servers" 79 | kubectl exec -it $MYSQL_S1_POD_NAME -- mysql -uradmin -ppx-admin -h release-all-proxysql-svc -P6032 -e "SELECT * FROM monitor.mysql_server_connect_log ORDER BY time_start_us DESC LIMIT 6" 80 | kubectl exec -it $MYSQL_S1_POD_NAME -- mysql -uradmin -ppx-admin -h release-all-proxysql-svc -P6032 -e "select Queries, srv_host from stats_mysql_connection_pool\G" 81 | kubectl exec -it $MYSQL_S1_POD_NAME -- mysql -uradmin -ppx-admin -h release-all-proxysql-svc -P6032 -e "select * from stats_mysql_query_rules" 82 | 83 | 84 | echo -e "\n\nChecking metrics..." 85 | sleep 10 86 | kubectl exec -it deploy/$MYSQL_MANAGER_DEPLOYMENT_NAME -- bash -c "curl release-all-mysql-exporter-s1-svc:9104/metrics | grep mysql_up" 87 | 88 | echo -e "\n\nDeleting objects..." 89 | kubectl delete -f $MANIFEST_FILE 2>/dev/null 90 | kubectl wait --for=delete -f $MANIFEST_FILE 91 | -------------------------------------------------------------------------------- /tests/setup-etcd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | setup_user() { 4 | docker compose exec etcd etcdctl user add root --new-user-password="password" 5 | docker compose exec etcd etcdctl user grant-role root root 6 | docker compose exec etcd etcdctl user add mm --new-user-password="password" 7 | docker compose exec etcd etcdctl role add mm 8 | docker compose exec etcd etcdctl role grant-permission mm \ 9 | --prefix=true readwrite mm/cluster1/ 10 | docker compose exec etcd etcdctl user grant-role mm mm 11 | docker compose exec etcd etcdctl auth enable 12 | } --------------------------------------------------------------------------------