├── .github
└── workflows
│ ├── coverage.yml
│ ├── lint.yml
│ ├── pytest.yml
│ └── release.yml
├── .gitignore
├── .mariadb
├── my.cnf
└── no_encryption_key.key
├── .mysql
└── dev.mysql.com.gpg.key
├── .pre-commit-config.yaml
├── .readthedocs.yaml
├── .travis.yml
├── CHANGELOG
├── CONTRIBUTING.md
├── README.md
├── TODO
├── docker-compose-test.yml
├── docker-compose.yml
├── docs
├── Makefile
├── binlogstream.rst
├── conf.py
├── developement.rst
├── events.rst
├── examples.rst
├── index.rst
├── installation.rst
├── licence.rst
├── limitations.rst
└── support.rst
├── examples
├── dump_events.py
├── logstash
│ ├── logstash-simple.conf
│ └── mysql_to_logstash.py
├── mariadb_gtid
│ ├── docker-compose.yml
│ ├── queries.sql
│ └── read_event.py
└── redis_cache.py
├── getting-started.md
├── logo.svg
├── pymysqlreplication
├── __init__.py
├── binlogstream.py
├── bitmap.py
├── column.py
├── constants
│ ├── BINLOG.py
│ ├── CHARSET.py
│ ├── FIELD_TYPE.py
│ ├── NONE_SOURCE.py
│ ├── STATUS_VAR_KEY.py
│ └── __init__.py
├── event.py
├── exceptions.py
├── gtid.py
├── packet.py
├── row_event.py
├── table.py
├── tests
│ ├── __init__.py
│ ├── base.py
│ ├── benchmark.py
│ ├── binlogfilereader.py
│ ├── config.json
│ ├── conftest.py
│ ├── test_abnormal.py
│ ├── test_basic.py
│ ├── test_data_objects.py
│ └── test_data_type.py
└── util
│ ├── __init__.py
│ └── bytes.py
├── pyproject.toml
├── scripts
├── install_mysql.sh
└── lint.sh
├── setup.py
└── test.Dockerfile
/.github/workflows/coverage.yml:
--------------------------------------------------------------------------------
1 | name: Python Coverage
2 |
3 | on: [push, pull_request]
4 |
5 | jobs:
6 | build:
7 |
8 | runs-on: ubuntu-latest
9 |
10 | steps:
11 | - name: Check out code
12 | uses: actions/checkout@v2
13 |
14 | - name: Set up Python
15 | uses: actions/setup-python@v2
16 | with:
17 | python-version: '3.x'
18 |
19 | - name: Install dependencies
20 | run: |
21 | python -m pip install --upgrade pip
22 | pip install -r requirements.txt
23 |
24 | - name: Run coverage
25 | run: |
26 | coverage run -m unittest discover
27 | coverage html
28 |
29 | - run: smokeshow upload htmlcov
30 | env:
31 | SMOKESHOW_GITHUB_STATUS_DESCRIPTION: CLI Coverage {coverage-percentage}
32 | SMOKESHOW_GITHUB_COVERAGE_THRESHOLD: 50
33 | SMOKESHOW_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
34 | SMOKESHOW_GITHUB_PR_HEAD_SHA: ${{ github.event.pull_request.head.sha }}
35 |
--------------------------------------------------------------------------------
/.github/workflows/lint.yml:
--------------------------------------------------------------------------------
1 | name: Lint
2 | on: [push, pull_request]
3 |
4 | jobs:
5 | lint:
6 | runs-on: ubuntu-latest
7 | steps:
8 | - name: Checkout repository
9 | uses: actions/checkout@v3
10 |
11 | - name: Set up Python
12 | uses: actions/setup-python@v4
13 | with:
14 | python-version: '3.x' # This will install the latest version of Python 3
15 |
16 | - name: Install dependencies
17 | run: |
18 | python -m pip install --upgrade pip
19 | pip install ruff black
20 |
21 | - name: Run lint script
22 | run: bash scripts/lint.sh
23 |
--------------------------------------------------------------------------------
/.github/workflows/pytest.yml:
--------------------------------------------------------------------------------
1 | name: PyTest
2 | on: [push, pull_request]
3 | env:
4 | PYTEST_SKIP_OPTION: "not test_no_trailing_rotate_event and not test_end_log_pos and not test_query_event_latin1"
5 | jobs:
6 | test:
7 | strategy:
8 | fail-fast: false
9 | matrix:
10 | include:
11 | - {name: 'CPython 3.7', python: '3.7'}
12 | - {name: 'CPython 3.11', python: '3.11'}
13 | - {name: 'Pypy 3.7', python: 'pypy-3.7'}
14 | - {name: 'Pypy 3.9', python: 'pypy-3.9'}
15 | name: ${{ matrix.name }}
16 | runs-on: ubuntu-latest
17 | timeout-minutes: 3
18 |
19 | steps:
20 | - name: Check out code
21 | uses: actions/checkout@v4
22 |
23 | - name: Setup Python
24 | uses: actions/setup-python@v4
25 | with:
26 | python-version: ${{ matrix.python }}
27 |
28 | - name: Run database server in docker
29 | run: |
30 | docker compose create
31 | docker compose start
32 | echo "wait mysql server"
33 |
34 | while :
35 | do
36 | if mysql -h 127.0.0.1 --user=root --execute "SELECT version();" 2>&1 >/dev/null && mysql -h 127.0.0.1 --port=3307 --user=root --execute "SELECT version();" 2>&1 >/dev/null; then
37 | break
38 | fi
39 | sleep 1
40 | done
41 |
42 | echo "run pytest"
43 |
44 | - name: Install dependencies
45 | run: |
46 | pip install .
47 | pip install pytest
48 |
49 | - name: Run tests for mysql-5
50 | working-directory: pymysqlreplication/tests
51 | run: pytest -k "$PYTEST_SKIP_OPTION" --db=mysql-5
52 |
53 | - name: Run tests for mysql-5-ctl
54 | working-directory: pymysqlreplication/tests
55 | run: pytest -k "$PYTEST_SKIP_OPTION" --db=mysql-5-ctl
56 |
57 | - name: Run tests for mysql-8
58 | working-directory: pymysqlreplication/tests
59 | run: pytest -k "$PYTEST_SKIP_OPTION" --db=mysql-8
60 |
61 | - name: Run tests for mariadb-10
62 | working-directory: pymysqlreplication/tests
63 | run: pytest -k "$PYTEST_SKIP_OPTION" -m mariadb --db=mariadb-10
64 |
--------------------------------------------------------------------------------
/.github/workflows/release.yml:
--------------------------------------------------------------------------------
1 | name: Publish Python 🐍 distributions 📦 to PyPI and TestPyPI
2 |
3 | on: push
4 |
5 | jobs:
6 | build-n-publish:
7 | name: Build and publish Python 🐍 distributions 📦 to PyPI and TestPyPI
8 | runs-on: ubuntu-latest
9 | steps:
10 | - uses: actions/checkout@v3
11 | - name: Set up Python
12 | uses: actions/setup-python@v4
13 | with:
14 | python-version: "3.x"
15 | - name: Install pypa/build
16 | run: >-
17 | python3 -m
18 | pip install
19 | build
20 | --user
21 | - name: Build a source tarball
22 | run: >-
23 | python3 -m
24 | build
25 | --sdist
26 | --outdir dist/
27 | - name: Publish distribution 📦 to PyPI
28 | if: startsWith(github.ref, 'refs/tags')
29 | uses: pypa/gh-action-pypi-publish@release/v1
30 | with:
31 | password: ${{ secrets.PYPI_API_TOKEN }}
32 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.py[co]
2 |
3 | # Packages
4 | *.egg
5 | *.egg-info
6 | dist
7 | build
8 | eggs
9 | parts
10 | bin
11 | var
12 | sdist
13 | develop-eggs
14 | .installed.cfg
15 |
16 | # Installer logs
17 | pip-log.txt
18 |
19 | # Unit test / coverage reports
20 | .coverage
21 | .tox
22 |
23 | # Translations
24 | *.mo
25 |
26 | #Mr Developer
27 | .mr.developer.cfg
28 |
29 | # Doc
30 | _build
31 |
32 | # Text Editor Backupfile
33 | *~
34 |
35 | # Intellij IDE
36 | .idea
37 | *.xml
38 | *.iml
39 |
40 | # Nose
41 | .noseids
42 |
43 | # Pyenv
44 | .python-version
45 | MANIFEST
46 |
--------------------------------------------------------------------------------
/.mariadb/my.cnf:
--------------------------------------------------------------------------------
1 | [client-server]
2 | # Port or socket location where to connect
3 | # port = 3306
4 | socket = /run/mysqld/mysqld.sock
5 |
6 | # Import all .cnf files from configuration directory
7 |
8 | !includedir /etc/mysql/mariadb.conf.d/
9 | !includedir /etc/mysql/conf.d/
10 |
11 |
12 | [mariadb]
13 | plugin_load_add = file_key_management
14 | # Key files that are not encrypted
15 | loose_file_key_management_filename = /opt/key_file/no_encryption_key.key
16 |
17 | # Encrypted key file
18 | # loose_file_key_management_filename=/opt/key_file/keyfile.enc
19 | # loose_file_key_management_filekey=FILE:/opt/key_file/no_encryption_key.key
20 | # file_key_management_encryption_algorithm=aes_ctr
21 |
22 | # Set encrypt_binlog
23 | encrypt_binlog=ON
24 |
--------------------------------------------------------------------------------
/.mariadb/no_encryption_key.key:
--------------------------------------------------------------------------------
1 | 1;dda0ccb18a28b0b4c2448b5f0217a134
2 |
--------------------------------------------------------------------------------
/.mysql/dev.mysql.com.gpg.key:
--------------------------------------------------------------------------------
1 | -----BEGIN PGP PUBLIC KEY BLOCK-----
2 |
3 | mQGiBD4+owwRBAC14GIfUfCyEDSIePvEW3SAFUdJBtoQHH/nJKZyQT7h9bPlUWC3
4 | RODjQReyCITRrdwyrKUGku2FmeVGwn2u2WmDMNABLnpprWPkBdCk96+OmSLN9brZ
5 | fw2vOUgCmYv2hW0hyDHuvYlQA/BThQoADgj8AW6/0Lo7V1W9/8VuHP0gQwCgvzV3
6 | BqOxRznNCRCRxAuAuVztHRcEAJooQK1+iSiunZMYD1WufeXfshc57S/+yeJkegNW
7 | hxwR9pRWVArNYJdDRT+rf2RUe3vpquKNQU/hnEIUHJRQqYHo8gTxvxXNQc7fJYLV
8 | K2HtkrPbP72vwsEKMYhhr0eKCbtLGfls9krjJ6sBgACyP/Vb7hiPwxh6rDZ7ITnE
9 | kYpXBACmWpP8NJTkamEnPCia2ZoOHODANwpUkP43I7jsDmgtobZX9qnrAXw+uNDI
10 | QJEXM6FSbi0LLtZciNlYsafwAPEOMDKpMqAK6IyisNtPvaLd8lH0bPAnWqcyefep
11 | rv0sxxqUEMcM3o7wwgfN83POkDasDbs3pjwPhxvhz6//62zQJ7Q2TXlTUUwgUmVs
12 | ZWFzZSBFbmdpbmVlcmluZyA8bXlzcWwtYnVpbGRAb3NzLm9yYWNsZS5jb20+iGYE
13 | ExECACYCGyMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAUCTnc+KgUJE/sCFQAKCRCM
14 | cY07UHLh9SbMAJ4l1+qBz2BZNSGCZwwA6YbhGPC7FwCgp8z5TzIw4YQuL5NGJ/sy
15 | 0oSazqmIZgQTEQIAJgUCTnc9dgIbIwUJEPPzpwYLCQgHAwIEFQIIAwQWAgMBAh4B
16 | AheAAAoJEIxxjTtQcuH1Ut4AoIKjhdf70899d+7JFq3LD7zeeyI0AJ9Z+YyE1HZS
17 | nzYi73brScilbIV6sYhpBBMRAgApAhsjBgsJCAcDAgQVAggDBBYCAwECHgECF4AC
18 | GQEFAlGUkToFCRU3IaoACgkQjHGNO1By4fWLQACfV6wP8ppZqMz2Z/gPZbPP7sDH
19 | E7EAn2kDDatXTZIR9pMgcnN0cff1tsX6iGkEExECACkCGyMGCwkIBwMCBBUCCAME
20 | FgIDAQIeAQIXgAIZAQUCUwHUZgUJGmbLywAKCRCMcY07UHLh9V+DAKCjS1gGwgVI
21 | /eut+5L+l2v3ybl+ZgCcD7ZoA341HtoroV3U6xRD09fUgeqIbAQTEQIALAIbIwIe
22 | AQIXgAIZAQYLCQgHAwIGFQoJCAIDBRYCAwEABQJYpXsIBQkeKT7NAAoJEIxxjTtQ
23 | cuH1wrMAnRGuZVbriMR077KTGAVhJF2uKJiPAJ9rCpXYFve2IdxST2i7w8nygefV
24 | a4hsBBMRAgAsAhsjAh4BAheAAhkBBgsJCAcDAgYVCgkIAgMFFgIDAQAFAlinBSAF
25 | CR4qyRQACgkQjHGNO1By4fVXBQCeOqVMlXfAWdq+QqaTAtbZskN3HkYAn1T8LlbI
26 | ktFREeVlKrQEA7fg6HrQiQEcBBABAgAGBQJQEuvlAAoJEPGiAvhqFx4r61sH/3bS
27 | 5P8fjQtlTA5YPrznjNKoBtSJYV2X4jbBIrL7xms+JvH0hURnvtW773w6CkYcYhl1
28 | OPEbrI4sc7wW+ikzLmOiaTlX8/Q49x/bKWK4h2vouq2Mkl2SKToXy4jJ08uzR9jr
29 | 2Asjc0kv3IiFAAiHx/9jR/MzU+QjRdbjzUbOx7B888+6TpU47U1oheHKvyI+megj
30 | a5nY/kojL42DfburHRChraDbacnIA+RxikfiOzXf+7+esoWlHuBabr7DV4oPOivb
31 | SOjFBUvAlMsux495FQWTlFlQTNOd5JxnQC0soEK+dAwN4zBilZeGZx43tkVVkZhU
32 | 6+WFqEUzMVEkDzC6QTSJARwEEAECAAYFAlNQfr0ACgkQKIW3A9M3HPHVsAgAll11
33 | g1yHAFkVMPo96YfHa/bt0iLZY598AXO3JaXJSlj7i708+5RoM5VQdLPIR+MYJEgP
34 | sy85eruepqVM7JBZe39SNwHPRhqTONDOb5pkfYcJQ9R3WbRn2w/sJI5aoIrTS6EX
35 | BnUX//lO8dPRoUkuwX99/bLpyF+rDIF0guC99g98w4xeYnBnW9JI/t5Qq0ZqfOd3
36 | RsgN33/clIgZMXCjWsKYu1c9w9nXVKThdwT/vDSj4OD9vrKyoJBW3eB4nXCEkArd
37 | 62OL2k5BJCyoNJzQOlOK9GIDmu8CE0rMRZZ7TDM2kYOd0LStPmJB9CrmUdxmPAe6
38 | YvrZYMnMlUe5iBaO64kBHAQQAQgABgUCVoFXdAAKCRBGbuG/4JOFCpPTB/0Z7exH
39 | HD40iQuQqwdgFOS2Ng7K/j3jyNrmz1rBHRvNqko4xqW4SB/C0oPz80KBh4hVjJpm
40 | ViZLUU/nXIC27N29lcAjQMAhPIzh3VJEIRKMIKJnTbx4gRyh4z+P5RevCjj4jv9p
41 | sWGJJqTOwVn95AeewyeQk4t/yS6De8xeNKiLP6dPXj758EraYJW27VJ+1zzvtfxH
42 | vFR84pgAaJudj5ECLsTYlVMy1Z920lqq3eXnLqi5Oss4z13dl4Qx1gvU8KZevBnj
43 | 67uhM0LbrItJukv7B4BF6ofyMaxWVSZZYxe+Sb6LGauc3rpa9IupDoo/mNaejBX3
44 | 4oSE2dYE5pPgi7ytiQEiBBABAgAMBQJOd0EuBQMAEnUAAAoJEJcQuJvKV618CfEH
45 | /23v1DvZqn6HNrDMocDKTeP33QVsHy5ANdhV0AzkDL8B/R7JhI531IGCl7HIWUYv
46 | g7h13tT+5fBtmkC7m4QbEH/Xoyr0RU87ljEJHSKvuiqLpcb+qH5AnL93dcbTtlu5
47 | 2+csXKVQT60XxhRnmyb2WufA8pgjYMhrQFOgDs+L3mrzZiNvhiA0LGOuvrWA72fg
48 | scM3WLvhw1fR5qyX3OnXjNJpwZ+0kY6s8ST4KE0IYoU2r+qv0ef9qeTb0Px/ODoE
49 | uho6LHxnNnblA8wj/5IXjESn87sigjh0D7SbiI/PvoH6R7RFOyA1F9UqN3PZ9D3a
50 | Xgb3JbA26UZwjcUFlOJgaLeJASIEEAECAAwFAk6IthkFAwASdQAACgkQlxC4m8pX
51 | rXy2QAf/aEasfjQvfFEA/8JcQkcrc74vzLj524EFDyxxGqddLbxIt90vx/8Q0f7X
52 | BqH2OHIwL6ObJGV13lqvdwL+zwAlG85INF9Hkq9qC+sMuusX6L+9gMErl/olCuvK
53 | rSmi6kS4rTNNsvGjUVf4ICr7e1DLxpjr7Oci2mJaG8rxmhQtgpX5DTrdjJVZ0GQp
54 | p2AQsDpLTxpBMYrtsmTIn3GBsUHKRylRRufSnhdnDNneMWDPByapEhhlt9OixVUs
55 | nkcBvsG7bYC6Q+WzP09m93xfyd+a2gkjC2Xmq0U6vGsoD5flzOzDNnkfeOmsOo5j
56 | rIDQRK277yHUI+i7nQGY/vIpnF1diokBIgQQAQIADAUCTpnZ0QUDABJ1AAAKCRCX
57 | ELibyletfALyCACBBdWSfkcu1jnSheaHkP8p2mFIoMzfdn1v/cwfMrkJZ4JBSRon
58 | WDG3ItbUdbIs4iFSRbrEd2aJ6gDxiv0lLWvWOili4ZnO3OVJxB5JavzX3TGdllUi
59 | MpF5XLasNLG2/s9XnBHeBvLvY+YpZTDtQd+q0GsmAD6Hly1/HWOFchS1RmkNNK1k
60 | AWBWa6cNegE1m1hiT999jUnwpOM2aupFxFtO6tLLd2AYAABnTUMhHmQmuHeKQUUl
61 | +tfPSo4Ohg5yHGojhehzujy6/X0ZS6+TmwoFPhvYBIX4lcAxFfRPu/watqWGfcIY
62 | ZAalkM3aV0zUzNuLmhIRNNRDzuc3gjtteKupiQEiBBABAgAMBQJOq6YBBQMAEnUA
63 | AAoJEJcQuJvKV618wIcH/3/Tvfc9et2ORWVSMXscql0PzHLfbih7lxFZ0RWGwOPX
64 | UCw+Eu9zfsze4J/YxXkHAaOMPKlVFfeFP7Wv1Wy5HbTURMqZTzOF2PGBOVn3fW38
65 | oN6tT2xIlY5PfebARftvk6PfWGCrLYFz6o5R9I1HxMg5nsTo/obKfEXOt5L5CMd+
66 | lQWuDjUWBXnxgISgzIoC9mAIXNVJBrZBk7i5pQhsCFepb4g2Q90SX+d++VRZ2h7+
67 | 3l38EMQsBPgcRtwlEa438Y2sTE5jD8gBtq4Q6eaE1BL6g2DW5KOVMWDpq0o7xeBc
68 | 18KnqxJVyVJFM0odKae0cRwf9ZQsQ7Ow9RdYZYBULjSJASIEEAECAAwFAk682RwF
69 | AwASdQAACgkQlxC4m8pXrXzFKQf+LNO3URFCyAZwNQ+qq93x7ECTWamwLsg5b4Bh
70 | i1s6MhA7PtSTBRZWmvhHiyKNd/OhlNg2ppgwtt6dyAe/bkISXg9mdU3FYOHAOuEn
71 | xfwh3GuXKHkUq3QIrnc1P3ICxrVyD/zmhWpl4HTMQIZaCxW0hzwmZMm9NKYVWvPs
72 | XiFQ6ytRwcmbDSF8v0kivFNuktj1WdqNeH1rymydQIGvDeg7lmPw/D9pot0QG4rm
73 | YH85Jii+YnR/8Ein5xSnU4Cc7LHQnNY2RBlZOaxWHFjqyQER3j/7bJ2/1MSgRLDb
74 | Tu3tzdnv6y1ZxMpFSDtOuoc+8WSv1TQUqcd5N6o+FzyuNSB8tokBIgQQAQIADAUC
75 | Ts6jvAUDABJ1AAAKCRCXELibyletfNrcB/9CM41y8TeOZ7yHxIwu+WvovWUi5akf
76 | JbBy+BqREnBSiNGtJU+CFQCGNkShvjALNnh5tFHyrsBrOY5bkWfUaiq3OtbGNF0F
77 | ff+852PQ29eoIV9OCY5JzirP3z4vO67ypAf0MSVkvKm2/3q0ZlvnDzY20cUxT0uS
78 | SMBVhwNeonhRKb3QIClep37yVU6ta4zw3JyAPQiTdkplBcfUMd0kXOiE22ptYYxZ
79 | 2OrAoegFIcSyNn9ZqtcR/T6hMuNlJuvKa0ak8ieyKMdYP1Aw5HnlHAIO09b4k8+7
80 | l6Ut1q6uM86OMjqWiXyw0d6SOYNmZEjRkzKOmskhODgAbfItT3NT5ugJiQEiBBAB
81 | AgAMBQJO8jxWBQMAEnUAAAoJEJcQuJvKV61814gH/Ra13tJYHDzPrWDljC4b+YFy
82 | YtlVAWi8ackV5v5CXJsSJyqtRgnd8g+PxSxzbt2XAs/7pJTwG2V1iNDunsKtC1rE
83 | bZJZiY1onnR/oRsmk7/eVZJaI0SAXfJwWiejPfK5YzAR0xcFr40BVX+BS0SUGah5
84 | b43ApKtg3fQViaRl76/4KSpJjCWDSv37X2UXAoW9+TSNfVkaToY/bvNPDj3Kilb2
85 | QXD8BRleXBAc5gAveCeyXA/PkvCJPIlCTEBhi90HJmTgaM6L4fOoY+yk0hAR5S15
86 | fN9s/PR9YS3ri2516Bi7983lBUsZL9Yd+S4WS6iDA0EVy7lM8RhzAaS4T8hlkSeJ
87 | ASIEEAECAAwFAk8ECRcFAwASdQAACgkQlxC4m8pXrXwAbwf/QsuvXIzqixssCE7N
88 | wqYP/+NI5nYUgFcYpSYrz+ieW7mxRwyZVU8eeNQ7+YeIUxtgULrnkOKtVQqUhlvq
89 | x3HYT8r5mRkc9a8XUEBodQ79AaHdnNzyVUmD0q0UsAmSZCRzrVUQXYyuJpGW+WhU
90 | 82vk6K/ZMc4+BXF1XeCgEvoR0B2B3AK/Lbdnji86nRBU1C3hokqDZ/j1c783X4Z3
91 | aYSvvBdo30NlCrnNNTu5NGJ/cWVcdDhunxNYErcy7+wuWpli0XfPsYfjDdBvDIDY
92 | JaQaBMbBDDJTwWElB2upcaXTDTLom6yMlZ3BPV/EDoSJrfCtLDZ/xHW7pXSTRmGw
93 | y7ezuIkBIgQQAQIADAUCTxXU3AUDABJ1AAAKCRCXELibyletfOKMB/sGf7i6Xs15
94 | 0sYoHh9bIoO9kBFzKzXeQ1RwiXYoN26PiuWJpje9dP5uc/ut1ylBFqXev5J2ozm4
95 | HJE4c1n7TXitUkhUBj5qGGT/RGZ0lOy/v3UlKRMW4/ONhuS2GoKwsjBrZcUcsFhJ
96 | m4u9kykvyTitIy4jnb1cvjx62UrtA01UO3CYdQZj7G8+888Y5nCOZfuweyu0R2R6
97 | uOZwy6CGm8A2nI+wV7wStAI6s+EMiRkFI1wu6vvPCPqbT7zl5ROZyPzW4giTjCOn
98 | f21jdSsGKwvNH2bhWs530BXD8lGg8MkWrp/it5BI4tsF3nKvsjJi1BKdNUyrFHji
99 | +dBw+oRx9O1viQEiBBABAgAMBQJPOW0LBQMAEnUAAAoJEJcQuJvKV618Cj0IAJZG
100 | 1LE6+55KvoWjpNIyqWh1hG0TBBjKwSNxexPkQzulf/MuCbZ9OBGA3PBoGTAWofpj
101 | oHiDat8vI0TGFeY1bWV79jFmBVHIzJFQSnqoYv6RhI5lHZnbdw5T792fnzEzEZgK
102 | HCPfNYmP4KPD1T4P/p60enBStscZzkfE2nIpCQRXkn5JDkYNbQ8442L9QeWSBJKa
103 | ZXqZeWoSQQau3KWIU3WOmJNEs9CfMBHX3y0mjca0bli60YVs2VO6fWxc32/+zh3N
104 | tXSZwe8Cr+Vb7YBPo4kMPbTimn+YXEoEWLheoXrvgduYHOVLvPJaONPZme6MUOoQ
105 | wp+yR784dK4ZkjfioneJASIEEAECAAwFAk9KkK0FAwASdQAACgkQlxC4m8pXrXwv
106 | VAf+KNoICWDNAi8RKT9BlPFCjod/v85tQze0HuJGNogrRdaZSh0Gk4LeRZYEQhWo
107 | l3Fb+1sxs/xZ5saJ0dZXi5fpH0ZcMjz56CPwFjwsvzTuApeSp47EprHQ1KTm8GeC
108 | mLHYpWUe1YibtrMHQCIJ1wVMK5USUewiuEwCq03qJ0YFUq1cfFbf//P3GI+9wi6v
109 | KGPyitIh4kK0sKOOLsoXYhSALLI8l5+v9hPsNM4Lz3sTUjTUTh0PKY26vjU2OhsC
110 | iBrGno91E0cEO6bLyl8/Mu8Xc5jgK/5IRr8QeYI5MdNqYpo3omvL+umh+q256IyT
111 | AKLzVEuBB3R57pb+K6LwvCn/YokBIgQQAQIADAUCT1XbGgUDABJ1AAAKCRCXELib
112 | yletfL1RCACxVzwc3ADtUp0GqwS88mDFWWbpj70lyp1BwDvupIwKVzyfqhUXVGkj
113 | bIWcVSmPWa8m5buKJf3y1E7dApYkgyMQ+i+BlxJuFWGApeJTzt21bCZxiEyxcEBh
114 | zNuxJo1aE4g/Aoe5D6thF693TEYM46Xc/umhxklHznS0z17nw71vN2xFhmkVbkyS
115 | sPsCK3LkakPrnFiYCRsHBOM3h42WnScdV1JtV/gWyO97okLrLMo4YUFVTVO0BHa4
116 | 8rrHGmfx9/MI1MDW8pcQzweCPKrgtZJ0NMEh6+4CqTPhPZ4YkteKaEalNfLu/P7C
117 | djFD150AOud9yAtt/Pyz/CYKgfgYkAU0iQEiBBABAgAMBQJPZ4N3BQMAEnUAAAoJ
118 | EJcQuJvKV618UEQH/1GZZJKRSY9aJvYSEc+4Evpe2L02iks6aPkwXMdwu2qiiaYq
119 | +k5TCvXG/Rxkx6Nl6dTdxbUC+9jbzugnCaQVEW70sm+ItIa5v2HG1AuGN3mo1oZE
120 | Ut4lXxjRW3P6x7YVYs9K+mOZYV19emlZA7+QiBVmhdwYlCtBZx62YsuBT2nFFwsB
121 | I797n/DKcVeaBJqPFbNYeEy96G8lM6qyJhaxDGBP9dpZBPIkeSlgmhfHhcpeehy9
122 | zz5fdCTEAV84cLBaFgj7sI3zOcOrFi5f/88p8MbZSfQkc/XNryumifA0btznwSjd
123 | YohHDRMp5LA5zVxK5+cVys96eaWHanvQS81EAg2JASIEEAECAAwFAk94p0YFAwAS
124 | dQAACgkQlxC4m8pXrXxjCggAyfe6pIHgZ7UPrWbSkhcl85wLoGEJm8yZXfFOnhkk
125 | J0JDONFXetlGS3QPDI5VikvMuAOC9tHCNeM7ObShvUO9gcUDsOcZMZSwEsNf/Fnm
126 | BG8O2QPrDxv/6vb95CJK7t7L+uJjAHp2bcbMpHsURsgrpsS485QHC9Xa1iRlGBNi
127 | pers0Eu42/tzzQZaEL8+p292EWfgoC8vQ+NUiCvn6Z1fK+Iqobu6+C5gEO0NQxfW
128 | qBmmL59vsSxslh8SQFHreMZDDTi6epPURdZ2zXuCP7KnggduQmYQkxLx3Y0hkw9s
129 | iUeY36FpMDvcPA2RtUPVq1qpIuIUgZdHJnLzt+KQtUiWM4kBIgQQAQIADAUCT4p0
130 | BQUDABJ1AAAKCRCXELibyletfAhRCACaeQW3Fe/iQTEUr5KFHAFg6CUj9roPcxsO
131 | RQfCfdDw3nylgirOU42sn9k7zOZ7auQxPUhml25aCMGu+BaGZYKpYTGeALWPQa/q
132 | MjyeUvOOMKQJljBXOjZxDxWh0JEPm3SWKUVTXfZc2oCT9utT90vkvU9lHh1XTiQY
133 | FiKvTG7OJZ2c1rJ2X0OC7M9Gj6nuPLZFPIcPp35W8vfeKiMDTpZP7viP5SWzc+cw
134 | 2PdaLHF00Zdo1dagYCmMMAl8cgd4/X8laUZfu0J6m01nAiAlRS/rrmKT9B43aCja
135 | ocxILgFEECzgfZrD0132toRvt9mWIOacyOVexgh6tCjuruQrY90xiQEiBBABAgAM
136 | BQJPnEBBBQMAEnUAAAoJEJcQuJvKV6184uwIAMfMCczXQ+1JYHSpqwCv/2Kh+2q0
137 | rLG9913+ObFRw6VFQOOeA13ga2aYvsxLqNN6OQYQLn9/HMH25FeozdZgtjdBWX6c
138 | uL+0cRyi2K+7a+q/g0Z9S9YIl+cm8QoGNHywMZMLHFlChigAVsZM/W/23hF2jtqz
139 | LfuwwVHaQDj+RTUUPFi0VgXDBkImk0uGHDQwIjZK0j/AjC0D/tOIUI2occAoAZ5L
140 | MpXIqs92OfzK3smzZ0zq3Y1Q7BdDnrdLizoll1GVdRX2hiHW/6AUC4PntQe+Kefq
141 | t2hfGCPe6gGaT2taokM1iA1/C1/UFxBcmd5r6sxq8ZBaKHF5XjT4Fk/H0bqJASIE
142 | EAECAAwFAk+uDf0FAwASdQAACgkQlxC4m8pXrXwzhggAin3I0JUsa1pRFK2GGaaA
143 | T+wxPmHZSBrpreZerwZsZrf+ThIaq/nVsLqIsgubZvQryW7mbqfGp1nxc00yZj0T
144 | 7JhOM+xJPtwjTFTNsCMp7CzrdaURSuP+UTY0S28COhLaAkH6S3GSf/0geerCuA6B
145 | Sh9vDpOHKZREj56DKgN0Gn3UhAPxS6r2Vf4j4LU8+740JDIK1wCFDOH7ynt4l5AB
146 | MRl7ZoHVx1RaCph68Sdg00rzO/9fc4xiXrtjISCh8qhJgpOoS48cghOaQB+fSpWw
147 | EDj9W0tvMZUbJAQKZtsDfizmVswuth4REXkOdIpwhrrrmytD6lEJ8mHPpVBcaxaX
148 | JYkBIgQQAQIADAUCT7/ZVQUDABJ1AAAKCRCXELibyletfAM4B/4kRRQouKo76Ko4
149 | tX4u8jT2VTjDDth9dyrPgH3I0bR7lWmt9LudDQ+0x/l0Husnq02qS4d2gJv7DT/f
150 | Wk8SLsEUpcihKA7Pd/l1UoEajwYGxf78b9MpKJN6ei/Q8RTv8OXC/CawYTGaCaxJ
151 | CARZMDfLVpXFz0RUL3HQWL9zNw02VKmPlB5zsR70s7JfiydEvV3HfQuW72H2m5mC
152 | bxuXkUt6Y9S/8t2ussOob2PibZ0AJLdOU3XhzCIEB/82/FfcMQ85hNLp/C8qAtic
153 | eU6YpELCUno8Cum93KMVQZZ4JLuvgqgwIpjm2S8Oh/cWGCCtBaCbO4+W0GRMFDuz
154 | iwYI3DDqiQEiBBABAgAMBQJP0aXPBQMAEnUAAAoJEJcQuJvKV618OPUH/03TW7p9
155 | obUPnEVmq7MzsfASxIlv8ej1clEKWZ1f9Kr4Ss5SVIzPYUD3ANALDo4arQjegsvC
156 | l2yv07z5vQ/zZOh1sA9T6Js6yVRZNssBhSLk0fpZbikqe5s5ucptMCbOXD6u99SJ
157 | t9UVEFPJ3Yy66vWDn+RVS/DapnDgFOBPHevAW5lQkAuTDK9PdNRSsCs50oX6cu1W
158 | DdufwQEo/HYZ8pwWEBK6k6DiQegp3SYXU5W30eEoq6gGGWYN13qhoQcsrRBxNcex
159 | TFlhWIXTbbJwFYWAW1qgMwbPJRfSXTIWn9FUFP+M9dIFa0bVy0f4sawovj+HDP3z
160 | kJKneqcKlpGl7eqJASIEEAECAAwFAk/jcSAFAwASdQAACgkQlxC4m8pXrXxQQQgA
161 | uvtE1CQ3DWqjsThtSlYmOimIPTQi/KcDSvkSjSvzdVflhiUB+6gzMEaDp98qIKx2
162 | seMSHjloiC67xytDuna0tQ47MpiIUUptl2xP/KWJhG7ifbx4d2/xUwoZkgDu3Q40
163 | inVg93b2mPJUrLgw6j3U1/Bczn61wjNtgv2D4O6FBtgObLxNW/sorJj+CplTVgHq
164 | RLj2XJxDDxYKKd4Wh4PV9vpo/27QwWK+qfZAICGNe54oLTUzY7SNHTC7uN2iM3nQ
165 | mB6jpTR4gdOPY5CbOeHzBgYxfaj7XJtJOOMcindfyV1jHTVJrcCrAoQFyUt2DnIj
166 | ICMeramFE53az+COqta364kBIgQQAQIADAUCT/U9VQUDABJ1AAAKCRCXELibylet
167 | fCfcCACNZSHfUjGxBxejyl0XteeJRKWqeYpjFP5Rk4/vWXF5pX/KNj6DDJm9/xNZ
168 | PhGVGnkgaDpo0vAtoMHeVxcEa5cAA0ZSv6RuV7C+IecJjdn49aK6K6yV76h+Nuzy
169 | 0UCvcs3cBuPvR0wTsY6EvKmU/aZcigCWcwMnQDKsrHkR4DaNLzMJYK7OKI6PhnBR
170 | kB6xdyGlI71X1DzpiI+2n6KRXSGZiBmkhdE1wc6nGdXIRb/3kwXCLiCkb5Wg3nuR
171 | YFe/pyx9pyjgEQi71f/vLg4ts26/NyKecdrQ917lGdGVsQ9AEFnKy3Zhm+JZMWxs
172 | bwrbGiR2i11kNpu+tEYyFHMFTBASiQEiBBABAgAMBQJQBmFrBQMAEnUAAAoJEJcQ
173 | uJvKV618MlQIAL7rxJesrhpJ4ljpb7hxLrFL//thggYOqlfiBKVbW0wgoIhY2EeR
174 | mrEqdYKWea+AaHDpWxnY3SRh06civMQ9YvuqmVlGITUYNjzl1Fc3DRJdoYaLsDyz
175 | on3Jk8AClO+kxL9y+Uk74i94VoFEdshFGd1LdCBlVerjxEfY6Ud6nejtz3rhZMH/
176 | jWhk8nuI4qCwMt2mMLWQlF872JjSz/dCuMvucmrmbXi5jXqtupoigvzULuLJzRQp
177 | T2xkYxw9XsfSg4gDLbToDeFhKu7K062ILU+d9VmOHXh6TYKD5Rz7gHwtB/kumFrj
178 | G4H3G+nVfka3fCI+szNwfz3I5rGFHzj3T7SJASIEEAECAAwFAlAYLX0FAwASdQAA
179 | CgkQlxC4m8pXrXwJtwgAkJcNtS33oHfEZiCE7H5xRv7HzNzL2XI1XxALbGN30yvT
180 | YslNAd8l9D1N3ot+6hFvudAc6okrX+VYTby322Ufsj6Lu6NjAD99Zqt1HOoK6U1e
181 | uAUSJApkVoONFjIzSNYb+wDf9GrYIwE2EF4JxMO5nxW9F2bTEw+nJq8/wPZ0z1YG
182 | Q8c6KTjIAggtWSuFaavRSnQmFGh6V97Kpw3/oKVSB5EwjIFi2EDnl+TZfn4J6uia
183 | aWkN8kmUVA6/rhVLNLrw0byP3J/rq64vdsHQ53M4xbpir8/3CyQIOUxtib9Scd8H
184 | nAC7FakN4C+3+769TjGqoSORRAanTdvS/XGXv2ZpK4kBIgQSAQoADAUCVaDccgWD
185 | B4YfgAAKCRBKM+zVJsj8Sw9RB/9noe2uPvANZTy8ti/cXDbdm4ny3xT9qRI3Burp
186 | QaDCqR77kwoLV+mT3R6TO7lDo3vxdQgjdCDwed91NTSKFiCp3cDVCY7oIbaETPnG
187 | jFHWMnOJEJtvUnuoTDw38rMFxOQcHj5qDpuMVj1Th/3FTdOM8i7sXZUGTUBf5yYO
188 | jzLM+MOc/iujRhptuDRD3HO9or/ukVHP68v+7+XFbuITufq9dOJpjVeci3nEBdd3
189 | B8tiZZ4CB1z0pcU2W3iV+6qsRO00IeZ74kj4HZPPaAeYxUseINXn14sLS5T+5Ww5
190 | cR3bqJskLh/cqpj2TG5CbfbmQ3PGczGLAw8JXOwSHuWxin46iQIcBBABAgAGBQJU
191 | mpxSAAoJEHcx6lOdpXKi3b4P/3ABD6nTeVUylhKAxYw0uCOrfPtCs3Q1g2nFRcCi
192 | fEQKNTNkpJQ8ZyL588rGXdlR6eT6u4uC2AmaOZQ5Hq7UOi8GkMXOF+psimOC0wOu
193 | djmxarEn4IfZFvmiQQQMnmoT0PAgw/mJdt/jGOcE7I+Kreyn3zc8u/Ly22J1Z70U
194 | N/wm6FYsdEkc4NCNzou59wdJYob18uYKMvYpI82Xr3ozBH433kQoKK8svY+Ubxx/
195 | Txd3yVbVLRwkuH/pMS5m0CdV6jQ1Q4ZRc48/KFM8//GF8t4puTGO2bquD+MFoiO6
196 | 5CBTI02Eqc/3r8ZFZZEe+Kjp4E9qWFRy8iZKsIeeotGl3IzGYrrtnMplLdiwfSLG
197 | jMlgTSY169naERjAH47keWWorbZjC8mcIsfZllZO7ZMJSMZk/yFujadKjIQ2XFmK
198 | j0aX+/jKmN2WDCuOedm/GgzI4V65zjsC7M/lCOG2Wta/XYLiFiLdgtm+QtUUPQLE
199 | AT8IO4eT/h4UbhQLgyivr4FfBUbs0SXipBasAeMtDb+wcymxSpj/lfv5K0b/vw8s
200 | /Qc/48gEIMNNrtbT7qR/QRoZUcpbjcgiRU0jY+xvji1NP5eFzaYeTjgu7MvgDGXL
201 | C/4LE/01wos0XArgKTUzw2npwAqW74PDqCM5jVoYeC12wFBwipmP/4RMPT702QNc
202 | Sh4MiQIcBBIBAgAGBQJSWLCkAAoJEKIq5OtDKcVFdl0P/29v7r04ZJG3jnoLrAwu
203 | AGX45y/1d5oJOImMuAAvYbptEfaDZVw0edT9UxMPZgtW7R4/u5uyKtWuZHucCAux
204 | DWYopa9Mj6nloC+fiwvfHc/y/OYagMCKBnjnzIa5WlWwGRI+MkEwCQwN+b2R1bEJ
205 | qfO4pdJvn2V9ODgS1wc8POhKAAGe0BKB+KhXE/ZNU9+t7bzZyJt5hL5EOVoxNUfb
206 | nISWtYO4XZTUJVZIxZj2aySvX5eM+eg7aPPT6OEoHuPIwx9KYmrOBwE2B7MVvNfZ
207 | 8+Y2cxnMS5xUUUeYiE1WYvas7Bz+zW1Id6sblgh0vcc4l6TnEmWPOPMV8Ot2OYwy
208 | X6c7qc7vyh4+gs12CVBBKqfTJbqBm/wQghD83WdKL8sRbrTPwUXHOg7xOrEwxf1B
209 | EttUTqn7sLSeqiWFl/pQSIMgib4+KEXW88VJXnj7s6gs5sKyF7aDiltEZY+Egnhf
210 | cgsFDAUxluf5wi5jeHcH9SltkT9hIOBEW7lMu0J8ET+VApftCEmScmFCEn4PH5jy
211 | HULHrVCvQo/kOY6Qdhl8BC11pyCuoSv+o16fhBhF0qnSBOd7wBXuy8Hx4j/w+TO9
212 | Q9oetrHSDplJt5Zu8TDpzObSpQYlfuIyUlJq4gk+ts6O9dPuoixqplFGJRJZej6b
213 | 6PMrmp15GEQhSaUkj+1T9GwatDtNeVNRTCBQYWNrYWdlIHNpZ25pbmcga2V5ICh3
214 | d3cubXlzcWwuY29tKSA8YnVpbGRAbXlzcWwuY29tPohGBBARAgAGBQI/rOOvAAoJ
215 | EK/FI0h4g3QP9pYAoNtSISDDAAU2HafyAYlLD/yUC4hKAJ0czMsBLbo0M/xPaJ6O
216 | x9Q5Hmw2uIhGBBARAgAGBQI/tEN3AAoJEIWWr6swc05mxsMAnRag9X61Ygu1kbfB
217 | iqDku4czTd9pAJ4q5W8KZ0+2ujTrEPN55NdWtnXj4YhGBBARAgAGBQJDW7PqAAoJ
218 | EIvYLm8wuUtcf3QAnRCyqF0CpMCTdIGc7bDO5I7CIMhTAJ0UTGx0O1d/VwvdDiKW
219 | j45N2tNbYIhGBBARAgAGBQJEgG8nAAoJEAssGHlMQ+b1g3AAn0LFZP1xoiExchVU
220 | NyEf91re86gTAKDYbKP3F/FVH7Ngc8T77xkt8vuUPYhGBBARAgAGBQJFMJ7XAAoJ
221 | EDiOJeizQZWJMhYAmwXMOYCIotEUwybHTYriQ3LvzT6hAJ4kqvYk2i44BR2W2os1
222 | FPGq7FQgeYhGBBARAgAGBQJFoaNrAAoJELvbtoQbsCq+m48An2u2Sujvl5k9PEsr
223 | IOAxKGZyuC/VAKC1oB7mIN+cG2WMfmVE4ffHYhlP5ohGBBMRAgAGBQJE8TMmAAoJ
224 | EPZJxPRgk1MMCnEAoIm2pP0sIcVh9Yo0YYGAqORrTOL3AJwIbcy+e8HMNSoNV5u5
225 | 1RnrVKie34hMBBARAgAMBQJBgcsBBYMGItmLAAoJEBhZ0B9ne6HsQo0AnA/LCTQ3
226 | P5kvJvDhg1DsfVTFnJxpAJ49WFjg/kIcaN5iP1JfaBAITZI3H4hMBBARAgAMBQJB
227 | gcs0BYMGItlYAAoJEIHC9+viE7aSIiMAnRVTVVAfMXvJhV6D5uHfWeeD046TAJ4k
228 | jwP2bHyd6DjCymq+BdEDz63axohMBBARAgAMBQJBgctiBYMGItkqAAoJEGtw7Nld
229 | w/RzCaoAmwWM6+Rj1zl4D/PIys5nW48Hql3hAJ0bLOBthv96g+7oUy9Uj09Uh41l
230 | F4hMBBARAgAMBQJB0JMkBYMF1BFoAAoJEH0lygrBKafCYlUAoIb1r5D6qMLMPMO1
231 | krHk3MNbX5b5AJ4vryx5fw6iJctC5GWJ+Y8ytXab34hMBBARAgAMBQJCK1u6BYMF
232 | eUjSAAoJEOYbpIkV67mr8xMAoJMy+UJC0sqXMPSxh3BUsdcmtFS+AJ9+Z15LpoOn
233 | AidTT/K9iODXGViK6ohMBBIRAgAMBQJAKlk6BYMHektSAAoJEDyhHzSU+vhhJlwA
234 | nA/gOdwOThjO8O+dFtdbpKuImfXJAJ0TL53QKp92EzscZSz49lD2YkoEqohMBBIR
235 | AgAMBQJAPfq6BYMHZqnSAAoJEPLXXGPjnGWcst8AoLQ3MJWqttMNHDblxSyzXhFG
236 | hRU8AJ4ukRzfNJqElQHQ00ZM2WnCVNzOUIhMBBIRAgAMBQJBDgqEBYMGlpoIAAoJ
237 | EDnKK/Q9aopf/N0AniE2fcCKO1wDIwusuGVlC+JvnnWbAKDDoUSEYuNn5qzRbrzW
238 | W5zBno/Nb4hMBBIRAgAMBQJCgKU0BYMFI/9YAAoJEAQNwIV8g5+o4yQAnA9QOFLV
239 | 5POCddyUMqB/fnctuO9eAJ4sJbLKP/Z3SAiTpKrNo+XZRxauqIhMBBMRAgAMBQI+
240 | PqPRBYMJZgC7AAoJEElQ4SqycpHyJOEAn1mxHijft00bKXvucSo/pECUmppiAJ41
241 | M9MRVj5VcdH/KN/KjRtW6tHFPYhMBBMRAgAMBQI+QoIDBYMJYiKJAAoJELb1zU3G
242 | uiQ/lpEAoIhpp6BozKI8p6eaabzF5MlJH58pAKCu/ROofK8JEg2aLos+5zEYrB/L
243 | sohMBBMRAgAMBQI+TU2EBYMJV1cIAAoJEC27dr+t1MkzBQwAoJU+RuTVSn+TI+uW
244 | xUpT82/ds5NkAJ9bnNodffyMMK7GyMiv/TzifiTD+4hMBBMRAgAMBQJB14B2BYMF
245 | zSQWAAoJEGbv28jNgv0+P7wAn13uu8YkhwfNMJJhWdpK2/qM/4AQAJ40drnKW2qJ
246 | 5EEIJwtxpwapgrzWiYhMBBMRAgAMBQJCGIEOBYMFjCN+AAoJEHbBAxyiMW6hoO4A
247 | n0Ith3Kx5/sixbjZR9aEjoePGTNKAJ94SldLiESaYaJx2lGIlD9bbVoHQYhdBBMR
248 | AgAdBQI+PqMMBQkJZgGABQsHCgMEAxUDAgMWAgECF4AACgkQjHGNO1By4fVxjgCe
249 | KVTBNefwxq1A6IbRr9s/Gu8r+AIAniiKdI1lFhOduUKHAVprO3s8XerMiF0EExEC
250 | AB0FAkeslLQFCQ0wWKgFCwcKAwQDFQMCAxYCAQIXgAAKCRCMcY07UHLh9a6SAJ9/
251 | PgZQSPNeQ6LvVVzCALEBJOBt7QCffgs+vWP18JutdZc7XiawgAN9vmmIXQQTEQIA
252 | HQUCR6yUzwUJDTBYqAULBwoDBAMVAwIDFgIBAheAAAoJEIxxjTtQcuH1dCoAoLC6
253 | RtsD9K3N7NOxcp3PYOzH2oqzAKCFHn0jSqxk7E8by3sh+Ay8yVv0BYhdBBMRAgAd
254 | BQsHCgMEAxUDAgMWAgECF4AFAkequSEFCQ0ufRUACgkQjHGNO1By4fUdtwCfRNcu
255 | eXikBMy7tE2BbfwEyTLBTFAAnifQGbkmcARVS7nqauGhe1ED/vdgiF0EExECAB0F
256 | CwcKAwQDFQMCAxYCAQIXgAUCS3AuZQUJEPPyWQAKCRCMcY07UHLh9aA+AKCHDkOB
257 | KBrGb8tOg9BIub3LFhMvHQCeIOOot1hHHUlsTIXAUrD8+ubIeZaIZQQTEQIAHQUC
258 | Pj6jDAUJCWYBgAULBwoDBAMVAwIDFgIBAheAABIJEIxxjTtQcuH1B2VHUEcAAQFx
259 | jgCeKVTBNefwxq1A6IbRr9s/Gu8r+AIAniiKdI1lFhOduUKHAVprO3s8XerMiGUE
260 | ExECAB0FAkeslLQFCQ0wWKgFCwcKAwQDFQMCAxYCAQIXgAASCRCMcY07UHLh9Qdl
261 | R1BHAAEBrpIAn38+BlBI815Dou9VXMIAsQEk4G3tAJ9+Cz69Y/Xwm611lzteJrCA
262 | A32+aYhlBBMRAgAdBQsHCgMEAxUDAgMWAgECF4AFAktwL8oFCRDz86cAEgdlR1BH
263 | AAEBCRCMcY07UHLh9bDbAJ4mKWARqsvx4TJ8N1hPJF2oTjkeSgCeMVJljxmD+Jd4
264 | SscjSvTgFG6Q1WCIbwQwEQIALwUCTnc9rSgdIGJ1aWxkQG15c3FsLmNvbSB3aWxs
265 | IHN0b3Agd29ya2luZyBzb29uAAoJEIxxjTtQcuH1tT0An3EMrSjEkUv29OX05JkL
266 | iVfQr0DPAJwKtL1ycnLPv15pGMvSzav8JyWN3Ih7BDARAgA7BQJCdzX1NB0AT29w
267 | cy4uLiBzaG91bGQgaGF2ZSBiZWVuIGxvY2FsISBJJ20gKnNvKiBzdHVwaWQuLi4A
268 | CgkQOcor9D1qil/vRwCdFo08f66oKLiuEAqzlf9iDlPozEEAn2EgvCYLCCHjfGos
269 | rkrU3WK5NFVgiI8EMBECAE8FAkVvAL9IHQBTaG91bGQgaGF2ZSBiZWVuIGEgbG9j
270 | YWwgc2lnbmF0dXJlLCBvciBzb21ldGhpbmcgLSBXVEYgd2FzIEkgdGhpbmtpbmc/
271 | AAoJEDnKK/Q9aopfoPsAn3BVqKOalJeF0xPSvLR90PsRlnmGAJ44oisY7Tl3NJbP
272 | gZal8W32fbqgbIkBHAQSAQIABgUCS8IiAwAKCRDc9Osew28OLx5CB/91LHRH0qWj
273 | PPyIrv3DTQ06x2gljQ1rQ1MWZNuoeDfRcmgbrZxdiBzf5Mmd36liFiLmDIGLEX8v
274 | yT+Q9U/Nf1bRh/AKFkOx9PDSINWYbE6zCI2PNKjSWFarzr+cQvfQqGX0CEILVcU1
275 | HDxZlir1nWpRcccnasMBFp52+koc6PNFjQ13HpHbM3IcPHaaV8JD3ANyFYS4l0C/
276 | S4etDQdX37GruVb9Dcv9XkC5TS2KjDIBsEs89isHrH2+3ZlxdLsE7LxJ9DWLxbZA
277 | ND9OiiuThjAGK/pYJb+hyLLuloCg85ZX81/ZLqEOKyl55xuTvCqltSPmSUObCuWA
278 | H+OagBdYSduxiQEiBBABAgAMBQJJKmigBQMAEnUAAAoJEJcQuJvKV618U4wIAKk/
279 | 45VnuUf9w1j7fvdzgWdIjT9Lk9dLQAGB13gEVZEVYqtYF5cEZzyxl8c7NUTCTNX3
280 | qLIdul114A4CQQDg5U9bUwwUKaUfGLaz380mtKtM9V9A4fl9H2Gfsdumr8RPDQih
281 | fUUqju+d0ycdmcUScj48Nctx0xhCCWNjOFPERHi9hjRQq7x6RKyFTLjM5ftdInHC
282 | o9S+mzyqz9O+iMgX68Mm+AVgdWSC9L6yGnw6H97GD28oRMGWBTzsmCyqf9I3YutH
283 | 8mGXRot3QbSJD7/AeZVh1BQwVoJnCT8Eo1pc/OYZkRRndE1thrX0yjuFwTeOzvqe
284 | HlgzEW/FtOCBW7iR0WSJASIEEAECAAwFAkozTogFAwASdQAACgkQlxC4m8pXrXwX
285 | iAf+Ked6Mgd98YyTyNiLHhllPulboCnKgj430jLzkfgv7ytVCu1xMfKrRWRw3fA9
286 | LC19mzNQX/So/o/ywsk0nUG2sfEs5FiMk+aC957Ic/MDagmXqKapZROJbzbZ/KNj
287 | 9rPCG9kXPGa9sUn6vk39nnv4hri30tNKpM0fMxRhpcoNoCrNl4rs/QTpdRpp7KBu
288 | NaMEtDU7R7OjMDL4qT+BcCmYMIYW4dIV7tmaC0VxtcszZcVCkxSigRMPZHwxSx37
289 | GdCx9/+TqlA4vGL6NQSxZKv+Kqa+WTqBngOl6YGO6FxdiXEliNRpf1mafmz6h8Xg
290 | YXFGpehjuX1n60Iz0BffuWbpL4kBIgQQAQIADAUCSkRyCgUDABJ1AAAKCRCXELib
291 | yletfPaaB/9FCSmYwz7mvzOfHZOlEAYeLnCS290XGW89o4FYTbw0PBOulygyqj2T
292 | MCK68RCNU2KFs/bXBHeS+dDzitMAfSaULYi7LJuCCmrDM5SX5aLSj6+TxkDQDR1K
293 | 1ZE3y6qd4Kx3VeeoN7Wu+oLj/3Jjbbe0uYCQ+/PniRra9f0Z0neTExZ7CGtVBIsK
294 | S1CnKBTR26MZMOom2eTRZwGFUX1PzuW/dbZ4Z0+J6XMdTm2td7OYYWPbV3noblkU
295 | rxyjtGtO3ip3Oe3zSCWHUFMaaEuXOMw8tN51wy6ybcPVAH0hOiBwb3iCFJ/20Qqa
296 | ZEno6edYzkqf0pwvrcTmiPb+Vj0fnjBJiQEiBBABAgAMBQJKVj5HBQMAEnUAAAoJ
297 | EJcQuJvKV61845AH/R3IkGIGOB/7x3fI0gOkOS0uFljDxysiM8FV06BfXbFpRgFM
298 | ZxAhNFUdKCDN98MDkFBd5S5aGkvhAHS7PVwQ8/BIyJaJeUG3AXmrpFV/c9kYn1+Y
299 | W5OQ9E7tKu5l5UOj1Y/weNtC04u6Rh/nrp6CvMBhH2nvhSBZ+2kO2auqtFOhuK6+
300 | wUHGixt5EK8RAKs3Sf6nkP2EJUHzy1Q8ec5YDiaV24AVkPFBZMCkpD3Z+seIGrL4
301 | zUkV7PPY4zd9g34Oqj8JvtnA4AD/Z1vBLujLixcQdt9aieOySA9DAVgHbe2wVS4z
302 | i5nBURsmD5u96CUOwNK1sOV+ACtdIv/T5qSUVweJASIEEAECAAwFAkpoCoQFAwAS
303 | dQAACgkQlxC4m8pXrXysfQf+IJyIPhTphk0kGPQY3v9e3znW30VahyZxoL6q25ee
304 | QWGmVeTFlU4JThUEyzgYGip8i9qBsFPJ9XgOL5bxTGv7/WOK7eX8e+gXHB3A2QYb
305 | rM0GFZKN3BCkbA++HmvJXU58tf+aBCB0ObG+rPn6QUNSPibu4tp65TaPVPSVHjNT
306 | TICxu3sneHB+okJcc5z1ubme8nAytKb6x0JM/keNSXAev2ZN7zG5m+Pqw7/DQ/gC
307 | ogzGML1bulP2rSh8bYpJPC3vAVuHTmxsbhRBg4l7j5KiHf4qMBrVzRy+YiHhwpf2
308 | p8JbCGF141+HUD1VMeGeXnNO/9SO+dC2OGUf8WrV4FIpxIkBIgQQAQIADAUCSnku
309 | CgUDABJ1AAAKCRCXELibyletfBjrCACDd/zvoveoNlNiUUBazelcGXwaxSvUMSRO
310 | UQNkxkoMzfA+aFpYFHWEwDfLqndpoJTIkgkESd5fODJT26oLFekLvx3mpzfGz8l3
311 | 9KzDM1i6+7Mtg7DnA3kvfVIuZBNDwqoTS6hHKcGa0MJDgzZQqJ9Ke/7T7eY+Hzkt
312 | UBLjzUY2kv5VV8Ji0p6xY27jT73xiDov00ZbBFN+xBtx2iRmjjgnPtjt/zU5sLiv
313 | 9fUOA+Pb53gBT+mXMNx2tsg07Kmuz7vfjR5ydoY7guyB3X1vUK9yAmCW1Gq67eRG
314 | 934SujZFikO/oZUrwRrQu2jj5v8B7xwtcCFCdpZAIRabD4BTglvPiQEiBBABAgAM
315 | BQJKjl+9BQMAEnUAAAoJEJcQuJvKV618DTwH/3DzIl1zwr6TTtTfTBH9FSDdhvaU
316 | EPKCbLT3WZWzIHREaLEENcQ85cGoYoBeJXVBIwBczZUpGy4pqFjYcWQ9vKFm2Nt1
317 | Nrs+v9tKc+9GECH0Y1a+9GDYqnepcN2O/3HLASCEpXFwQhVe01G+lupGgqYfMgTG
318 | 9RByTkMzVXB9ER5gijGCzjTflYAOFUx2eBBLYa3w/ZZpT+nwRmEUaDpfwq06UPrz
319 | MZuhol7SGPZUNz4lz4p2NF8Td9bkhOiJ3+gORRohbq0HdaRdvSDoP/aGsQltfeF5
320 | p0KEcpIHx5B05H1twIkOGFTxyx3nTWqauEJy2a+Wl5ZBl0hB2TqwAE9Z54KJASIE
321 | EAECAAwFAkqgEkcFAwASdQAACgkQlxC4m8pXrXwyXwf/UPzz+D+n19JWivha7laU
322 | xuDzMQCKTcEjFCu4QVZ1rqcBFPoz0Tt74/X75QdmxZizqX1E6lbFEsbVjL2Mt5zZ
323 | jedS1vbSbrmn4hV4pHZr08dbflZkNX105g8ZlpsqQ7VyUt5YtWCn0tGNn4B5Eb6W
324 | MeqxQteujV3B7AtMH+CD0ja+A2/p0rHIpqScz8aupksBMCrYqhoT+7/qXNEVkjNm
325 | cu2NmHxfv6dL5Xy/0iJjie2umStu8WTfRTpYmnv2gEhbCdb/zhFvG61GgTBJqv9M
326 | vBVGRxnJFd4lNqlucsadD+UM7WjV3v5VuN2r9KD9wocd/s22ELCRA2wKccvR/nWB
327 | kIkBIgQQAQIADAUCSqgQAAUDABJ1AAAKCRCXELibyletfAT8B/9cPhH8DlHoiv+c
328 | K8rAJMomZqVqOyy4BwsRrakycVlg7/yvMs74anynSoUf0LgsXADQ29Hmrpf+zC5E
329 | 5/jPGWNK81x2VBVoB8nZkMSAnkZfOw+mWu9IAj2NLcsvt9JYNmAq5R7RrirHsDQ2
330 | DIYxRgaE/5CVEVry9YQEj18A13/SYyoB4FWpDI4fRfUWJbUJrYmfg0p+4zL0YS9F
331 | 11UhsHUu+g1W1c83N54ozI1v0l3HUwVayzII4E/YNrIkpOaO+o8Rz9g6M6jCg3mw
332 | n+OfiZVJO++VOiguJF5KzoZIICMxXE3t5hL87Kroi7UkNwm+YHw3ZaLEBm0BWAXw
333 | 4DsJZcpViQEiBBABAgAMBQJKuceJBQMAEnUAAAoJEJcQuJvKV6188KEH/24QK2LV
334 | 1l424Wx3T9G4bJFRWWuuEkTpYJw6ss72lqus9t7BsoGaNLMHQzKAlca9wLTqY826
335 | q4nv9anEqwWZ+Di8kE+UAMUq2BFTL0EvOMJ6i1ZyE8cUFVb1+09tpBWJJS7t3z00
336 | uMMMznGuHzSm4MgCnGhAsOgiuHdPWSlnHnqNJa/SB6UVQxtcDOaqQlLIvhd2HVqr
337 | OBRtER3td/YgLO6HSxXpXtz8DBa2NYQYSwAdlqJAPLBnBsLXwbCswuIDMZZv8BJw
338 | UNBEJkokOMv5CXxhPrP5kxWvyBvsIhTk8ph2GIh/ZRVNDAsChbuU1EJBACpwaMrc
339 | gwjPtI7/KTgeZVSJASIEEAECAAwFAkreCMYFAwASdQAACgkQlxC4m8pXrXyOQQf7
340 | BvRm/3PvFCCksyjBW4EVBW7z/Ps/kBK6bIE9Q7f7QlXFIcGGUIpArufXWbV+G4a3
341 | Z8LFeFJTovNePfquwpFjneUZn1CG+oVS1AfddvYhAsgkLhQqMbaNJIJ1y4D/H3xv
342 | Cna/s7Teufud0JLXoLBedFXeB5Cg2KlEoxINqMo+lm/VGJmbykwqoRvxZLDfnbFa
343 | g5zG59+OWw4TC8nzlIQYIBn22YiWRk5zsCJA40O+KL1vwBiFDrREhALQc/YBJKYr
344 | RX3ZV4U/EeYDKB0NCBk1W1tXGCee3uhM0S5VFc1j7Pg58ECuntH5xOy+KMNFljiQ
345 | wvWfbaFTJvCjFQS+OplXb4kBIgQQAQIADAUCSu86VAUDABJ1AAAKCRCXELibylet
346 | fGs8CACteI2BmKs24GF80JeWTOQIcvHnCdV7hKZOltbNPBbDv6qTt3iX2GVa10iY
347 | hI5Eg3Ojt/hKFJTMlfYZyI1peFodGjv7Lk5lu7zaNBvT1pBCP+eJspi6rGpSuhtM
348 | Sb4O5jPclRBmbY+w9wctLyZf1zG+slSdw8adcRXQNFqrvVIZYOmu2S8FunqLfxpj
349 | ewiFiDPzAzmbWzMoO2PLCYFhwV6Eh2jO33OGbvBmyHNFZBfX5F/+kiyeT47MEhrf
350 | hytJ6ZOdpxtX8HvbvzPZcDLOI80W6rPTG76KW06ZiZrJ81YCa6a7D01y7BYyW2Ho
351 | xzYcuumjRkGF4nqK4Mw+wefCp0H/iQEiBBABAgAMBQJLAF3aBQMAEnUAAAoJEJcQ
352 | uJvKV618/q0H/ibXDQG2WQmC1LoT4H+ezXjPgDg8aiuz6f4xibTmrO+L4ScMX+zK
353 | 0KZVwp6Kau28Nx+gO0oAUW8mNxhd+cl0ZaY+7RIkxEvkooKKsArBmZT+xrE6CgHl
354 | As3D4Mc+14nfD0aZaUbEiobWvXlYLl27MELLcWyeMlgbeNoucc473JddvmHSRRM5
355 | F9Qp28CvWDEXYqhq1laoaho8+ceipvzyuO3OTwjuAOqhefOHzAvFrRli99ML8xzF
356 | 1ZOvBct+36SuYxDXyIhkSd7aG9Us0lW6W5SiJYt4cDyI0JDhbhZN0tzWYKcKMZMx
357 | f8w3jW4sfQL0prhHrARqqPiU8OTUH/VNX5CJASIEEAECAAwFAksRgasFAwASdQAA
358 | CgkQlxC4m8pXrXydogf/a31ofmYFMoE3p9SqGt/v28iyO0j9A1LmqKwEhJkxff/X
359 | /Qa7pafGQ9J90JQkxYKMxydWPspTbDFMccZWkBK132vZp9Q3FHKpnDPDLK2S25mi
360 | TReeAAQNgMMFLeyy7ZHi5YsKwLbKxcSo7/m0jlitNYlmt94imFNpg/mHGsy6O+rL
361 | eQTAopuIzP3VwN6ItL5gIFxqWPmf/V0xh/vxTwLqJ66vECD8vyHrHblUzgiXHgyY
362 | bZPxAa2SRRd34V38phaZ/QsTkss+Sd/QeHChWyU9d6KengWwcr/nDO+K/hhmnO5O
363 | qz02Upwyxrgi6484HQUN/Smf44VBsSD1DBjaAKjMr4kBIgQQAQIADAUCSyNN1AUD
364 | ABJ1AAAKCRCXELibyletfCWiB/9cEZtdFVcsxpE3hJzM6PBPf+1QKuJORve/7MqN
365 | Eb3TMWFgBxyOfvD7uMpCJyOrqq5AbUQfZfj9K7qmzWUMuoYceGIlbdmHFBJwtmaF
366 | 0BiyHaobgY/9RbdCNcbtzrW34feiW9aDZyvCoLHEVkCCQACSv3FwdYVkkRB5eihv
367 | pwJk5tpScdIA12YLqzmVTFdhrZuYvtDdQHjgoLMO8B9s9kok7D2TSpveVzXXPH68
368 | Z3JkVubhHT7cs+n+9PRvcaVJtsX2VTUY5eFVqmGuAUVrvp2aN8cKQ+mVcCQrVVIh
369 | T9o8YB5925MUx2VJml0y0nkBQuMZyzMEOVGkuU/G+pVrRmmAiQEiBBABAgAMBQJL
370 | JyaSBQMAEnUAAAoJEJcQuJvKV618eU0IAKnVh6ymId9C3ZqVyxwTnOB8RMQceJzw
371 | CLqk2RT0dPhN5ZwUcQN7lCp9hymMutC8FdKRK/ESK21vJF2/576Pln4fIeOIbycB
372 | AEvqrL14epATj53uBizoNOTuwb1kximFERuW3MP4XiFUJB0tPws2vR5UU3t6GoQJ
373 | JwNoIbz9DK2L6X/Qz3Tb9if6bPSKU6JR1Yn3Hos9ogg21vWCxgMTKUuPAYhmYjSv
374 | kqH3BihXi+c17MVvE7W5GJbQHuJo+MgSxu044qnvDHZpf4Mzc30XcG1ohjxefNye
375 | iY2bzdI2yCaCtmWOlCW1Sc2oiE0zwO6lD4hY5XmC2XqlMLsKB5VNXJGJASIEEAEC
376 | AAwFAks4Ze4FAwASdQAACgkQlxC4m8pXrXyWXggAon2abiNvRzx97364Mjx4IlFv
377 | M1tVebzNbOkDwZS1ABqTDGgq/ffZA/VZrU+h2eL97cQyGxJEQ5kkm/v1iobEZEFM
378 | T0pv9WMzfidqzhdKdcpbbxdaErIjD5fBACKdjazAUeH7zce2v+bBN0l9LZoRiXbN
379 | ugG938lkJ2E4ZTYYfvftL/e4RzOgqR9VD/A5MzxfXFbCVharHbeT8OwZy4Oz2UDa
380 | DszHsNKoG1WNpOSf2HTMBPNcsOSY/hIBRWNxnzdYOkWt7laeLNmN1eUEwzk4J7Gn
381 | lambPIctOdoEUriMSaeyTkLZGejKnwi/PqARyDW1FsReKNHD753ZMViUnAsq2IkB
382 | IgQQAQIADAUCS0oyJwUDABJ1AAAKCRCXELibyletfGodCAC5hjmxwquHSb8ZL0Ri
383 | fIL3j3iU6U7qLK1TQKkTqgELfUzeF9f8NuNRtxLmzNk1T7YI9iji6NAtnuy43v61
384 | OMbqlkV8x69qNP36Owv408wXxEt0s5ViZuVOZJAY075cYRhopgfmhkh4hbkAoKCL
385 | ajOR0WUEEsDHsqqj8XLJuGRREURy8TJWaB/cotXsgiJf99gt+gIwIn8tyb3+WVIU
386 | HWfw2+Drpd3nfcMqgeO54PePJo0BWWjaar+wgC/76Se286IHcYMrml/AdnvxZaIK
387 | mxZmkTmDMCfMnVjRYSKBGjQ9Uu7dws7SMsbbd34f8Jt9nyuRqMcl4INAXthWY/S3
388 | SdiliQEiBBABAgAMBQJLW/5mBQMAEnUAAAoJEJcQuJvKV6181L8IAKq3ZOQHzqaO
389 | oz5wnvj51YG8nZoW5RG7HOb3mL1D9b+FTTzaIxsLf7STagPwKtM57rU/7ehHIuO/
390 | 9QQNQ3Mudw17ZiwD0l5X7iG8/AflWnc6bXfTz18IplRuqyVc0qQeJZhT7MBpklcS
391 | 4ZGZHPQdtAh4Aw5YXihrbbq6jV7jCzUmFz4XcT8CkJHIUGoFR0vTmFqlAt2K1imw
392 | GMh2IEamPOJ0wsTbBfZbhmkB03RToEjIipGZM+NtKS/NL2RJYWZ+FCCcEMoRgmlV
393 | mATWw3natgLWwN4Z6K4rGXONWi/0wyFgxZpmjdHmjcXaIgz8EroVsLbnaV/8yG7c
394 | gK5e6M0Fk1iJASIEEAECAAwFAkttIfgFAwASdQAACgkQlxC4m8pXrXyR3QgAksvA
395 | MfqC+ACUEWSVAlepDFR1xI45UwBa2UeBY7KjOOCiZlkGREvx20IOv1gExyPlzNxD
396 | eqmYsl2mleEoH6QlXaJRd8MxIVfAnjAt8izwU2dfDwflTTWgGQYf8q7qeAv1XC34
397 | yNge0JaTD1C55QpmcO51f2ojMsAi36bBJO4Dr59jhVYiDjQADS/d7FpAznlhH9SG
398 | Uq6ekYb2jxCSrvt0wRtMyk6YGgts4xEHcN0wC9VTobaXo9xvsqhtUK44Gdvptq1c
399 | BFX8byzD6fN8nXp+v8qhtlPYDqb4muqTh2UXXiWMtvPXo7kkZQ8CvI3YbZ10F1ID
400 | Lt20VJWFZaJYL2fzyokCIgQQAQIADAUCQYHLhQWDBiLZBwAKCRCq4+bOZqFEaKgv
401 | EACCErnaHGyUYa0wETjj6DLEXsqeOiXad4i9aBQxnD35GUgcFofC/nCY4XcnCMME
402 | nmdQ9ofUuU3OBJ6BNJIbEusAabgLooebP/3KEaiCIiyhHYU5jarpZAh+Zopgs3Oc
403 | 11mQ1tIaS69iJxrGTLodkAsAJAeEUwTPq9fHFFzC1eGBysoyFWg4bIjz/zClI+qy
404 | TbFA5g6tRoiXTo8ko7QhY2AA5UGEg+83Hdb6akC04Z2QRErxKAqrphHzj8XpjVOs
405 | QAdAi/qVKQeNKROlJ+iq6+YesmcWGfzeb87dGNweVFDJIGA0qY27pTb2lExYjsRF
406 | N4Cb13NfodAbMTOxcAWZ7jAPCxAPlHUG++mHMrhQXEToZnBFE4nbnC7vOBNgWdjU
407 | gXcpkUCkop4b17BFpR+k8ZtYLSS8p2LLz4uAeCcSm2/msJxT7rC/FvoH8428oHin
408 | cqs2ICo9zO/Ud4HmmO0O+SsZdVKIIjinGyOVWb4OOzkAlnnhEZ3o6hAHcREIsBgP
409 | wEYVTj/9ZdC0AO44Nj9cU7awaqgtrnwwfr/o4V2gl8bLSkltZU27/29HeuOeFGjl
410 | Fe0YrDd/aRNsxbyb2O28H4sG1CVZmC5uK1iQBDiSyA7Q0bbdofCWoQzm5twlpKWn
411 | Y8Oe0ub9XP5p/sVfck4FceWFHwv+/PC9RzSl33lQ6vM2wIkCIgQTAQIADAUCQp8K
412 | HAWDBQWacAAKCRDYwgoJWiRXzyE+D/9uc7z6fIsalfOYoLN60ajAbQbI/uRKBFug
413 | yZ5RoaItusn9Z2rAtn61WrFhu4uCSJtFN1ny2RERg40f56pTghKrD+YEt+Nze6+F
414 | KQ5AbGIdFsR/2bUk+ZZRSt83e14Lcb6ii/fJfzkoIox9ltkifQxqY7Tvk4noKu4o
415 | LSc8O1Wsfc/y0B9sYUUCmUfcnq58DEmGie9ovUslmyt5NPnveXxp5UeaRc5Rqt9t
416 | K2B4A+7/cqENrdZJbAMSunt2+2fkYiRunAFPKPBdJBsY1sxeL/A9aKe0viKEXQdA
417 | WqdNZKNCi8rd/oOP99/9lMbFudAbX6nL2DSb1OG2Z7NWEqgIAzjmpwYYPCKeVz5Q
418 | 8R+if9/fe5+STY/55OaI33fJ2H3v+U435VjYqbrerWe36xJItcJeqUzW71fQtXi1
419 | CTEl3w2ch7VF5oj/QyjabLnAlHgSlkSi6p7By5C2MnbCHlCfPnIinPhFoRcRGPjJ
420 | e9nFwGs+QblvS/Chzc2WX3s/2SWm4gEUKRX4zsAJ5ocyfa/vkxCkSxK/erWlCPf/
421 | J1T70+i5waXDN/E3enSet/WL7h94pQKpjz8OdGL4JSBHuAVGA+a+dknqnPF0KMKL
422 | hjrgV+L7O84FhbmAP7PXm3xmiMPriXf+el5fZZequQoIagf8rdRHHhRJxQgI0HNk
423 | nkaOqs8dtrkCDQQ+PqMdEAgA7+GJfxbMdY4wslPnjH9rF4N2qfWsEN/lxaZoJYc3
424 | a6M02WCnHl6ahT2/tBK2w1QI4YFteR47gCvtgb6O1JHffOo2HfLmRDRiRjd1DTCH
425 | qeyX7CHhcghj/dNRlW2Z0l5QFEcmV9U0Vhp3aFfWC4Ujfs3LU+hkAWzE7zaD5cH9
426 | J7yv/6xuZVw411x0h4UqsTcWMu0iM1BzELqX1DY7LwoPEb/O9Rkbf4fmLe11EzIa
427 | Ca4PqARXQZc4dhSinMt6K3X4BrRsKTfozBu74F47D8Ilbf5vSYHbuE5p/1oIDznk
428 | g/p8kW+3FxuWrycciqFTcNz215yyX39LXFnlLzKUb/F5GwADBQf+Lwqqa8CGrRfs
429 | OAJxim63CHfty5mUc5rUSnTslGYEIOCR1BeQauyPZbPDsDD9MZ1ZaSafanFvwFG6
430 | Llx9xkU7tzq+vKLoWkm4u5xf3vn55VjnSd1aQ9eQnUcXiL4cnBGoTbOWI39Ecyzg
431 | slzBdC++MPjcQTcA7p6JUVsP6oAB3FQWg54tuUo0Ec8bsM8b3Ev42LmuQT5NdKHG
432 | wHsXTPtl0klk4bQk4OajHsiy1BMahpT27jWjJlMiJc+IWJ0mghkKHt926s/ymfdf
433 | 5HkdQ1cyvsz5tryVI3Fx78XeSYfQvuuwqp2H139pXGEkg0n6KdUOetdZWhe70YGN
434 | Pw1yjWJT1IhUBBgRAgAMBQJOdz3tBQkT+wG4ABIHZUdQRwABAQkQjHGNO1By4fUU
435 | mwCbBYr2+bBEn/L2BOcnw9Z/QFWuhRMAoKVgCFm5fadQ3Afi+UQlAcOphrnJ
436 | =tUml
437 | -----END PGP PUBLIC KEY BLOCK-----
438 |
--------------------------------------------------------------------------------
/.pre-commit-config.yaml:
--------------------------------------------------------------------------------
1 | repos:
2 | - repo: https://github.com/pre-commit/pre-commit-hooks
3 | rev: v4.4.0
4 | hooks:
5 | - id: trailing-whitespace
6 | - id: end-of-file-fixer
7 | - id: check-yaml
8 | - id: check-added-large-files
9 |
10 | - repo: https://github.com/astral-sh/ruff-pre-commit
11 | rev: v0.0.286
12 | hooks:
13 | - id: ruff
14 | args: [ --fix, --exit-non-zero-on-fix ]
15 |
16 | - repo: https://github.com/psf/black
17 | rev: 23.7.0
18 | hooks:
19 | - id: black
20 |
--------------------------------------------------------------------------------
/.readthedocs.yaml:
--------------------------------------------------------------------------------
1 | # Read the Docs configuration file for Sphinx projects
2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
3 |
4 | # Required
5 | version: 2
6 |
7 | # Set the OS, Python version and other tools you might need
8 | build:
9 | os: ubuntu-22.04
10 | tools:
11 | python: "3.11"
12 | # You can also specify other tool versions:
13 | # nodejs: "20"
14 | # rust: "1.70"
15 | # golang: "1.20"
16 |
17 | # Build documentation in the "docs/" directory with Sphinx
18 | sphinx:
19 | configuration: docs/conf.py
20 | # You can configure Sphinx to use a different builder, for instance use the dirhtml builder for simpler URLs
21 | # builder: "dirhtml"
22 | # Fail on all warnings to avoid broken references
23 | # fail_on_warning: true
24 |
25 | # Optionally build your docs in additional formats such as PDF and ePub
26 | # formats:
27 | # - pdf
28 | # - epub
29 |
30 | # Optional but recommended, declare the Python requirements required
31 | # to build your documentation
32 | # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
33 | # python:
34 | # install:
35 | # - requirements: docs/requirements.txt
36 |
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | dist: trusty
2 | language: python
3 | services:
4 | - docker
5 | python:
6 | - "3.4"
7 | - "3.11"
8 | - "pypy"
9 | env:
10 | - DB=mysql57
11 | - DB=mysql56
12 | install:
13 | # Needs a newer version of pip to do the pip installation line
14 | - pip install pip --upgrade
15 | - pip install -e '.[test]'
16 | - pip install nose
17 | cache:
18 | apt: true
19 | pip: true
20 | directories:
21 | - $HOME/bins
22 | before_script:
23 | - env | grep DB
24 | - bash -c "if [ '$DB' = 'mysql57' ]; then sudo ./scripts/install_mysql.sh 5.7; fi"
25 | - bash -c "if [ '$DB' = 'mysql56' ]; then sudo ./scripts/install_mysql.sh 5.6; fi"
26 | script:
27 | - "sudo $(which nosetests) pymysqlreplication.tests.test_abnormal:TestAbnormalBinLogStreamReader.test_no_trailing_rotate_event"
28 | - "nosetests -e test_no_trailing_rotate_event"
29 |
30 | deploy:
31 | provider: pypi
32 | user: noplay
33 | password:
34 | secure: "uUIWiYzfgNTUuz4rKpNv/JFjnHncHPtBLiwhknCoaVJ9QGenpj9qaEWyvHZ/Aq4HyXCSjsogtQBesSxBTQCsrut6DDOWbOOFGr7WHA2N5O6EEu8rB7U4m+ziJn27+pN+MIl672tJGao7Ky9F+RO3X110Lscb0nguEMnWcHxb16U="
35 | on:
36 | tags: true
37 | repo: noplay/python-mysql-replication
38 |
--------------------------------------------------------------------------------
/CHANGELOG:
--------------------------------------------------------------------------------
1 | 0.1 01/05/2013
2 | * Initial Release with MySQL 5.5 and MySQL 5.6 support
3 |
4 | 0.2 13/10/2013
5 | * Make it work under pymysql 0.6
6 | * Ignore position of some events
7 | * Fix FormatDescriptionEvent has zero log_pos
8 | * Support checksum for mysql 5.6
9 | * Add feature to start stream from position
10 | * Change names of events to V2
11 | * Added NotImplementedEvent for a few events that we currently don't need
12 | * Support null events and a slight change of names
13 | * Support MySQL Broken Dates :(
14 | * Introduce data objects for Table / Column
15 | * Add support for TINYINT(1) to bool() Mapping
16 |
17 | 0.3 07/07/2014
18 | * use NotImplementedEvent instead of raising an Exception
19 | * Python 3 fix
20 | * Add 2006 to Mysql expected error codes
21 |
22 | 0.4 01/09/2014
23 | * Add primary column informations (thanks to Lx Lu)
24 | * Python 2.6 support (thanks to Darioush Jalalinasab)
25 | * Parse gtid events (thanks to Arthur Gautier)
26 | * Code cleanup (thanks to Bernardo Sulzbach)
27 | * Travis support
28 |
29 | 0.4.1 01/09/2014
30 | * Fix missing commit for GTID in 0.4 release
31 |
32 | 0.5 28/09/2014
33 | * Remove default server id
34 | * Performances improvements
35 | * Allow filter events by schema and tables
36 |
37 | 0.6 10/05/2015
38 | * Prevent invalid table-map-entries to crash the whole app
39 | * Add support for Stop Event, update tests
40 | * Fix the order of binlog events, though we don't support them yet
41 | * Simplified RowsEvent.rows to be @property instead of __getattr__ hack
42 | * add binlog row minimal and noblob image support
43 | * remove six not being used.
44 | * misc code style improvements, mainly pep8
45 | * Update event.py to be compatible with python2.6.7
46 | * explicitly break reference cycle when closing binlogstreamreader
47 | * break reference loop using weakref to prevent memory-leaking
48 | * Freeze schema.
49 | * Freeze table schema
50 | * Avoid named parameters passed to packet because it's slower
51 | * Filter table and schema event
52 | * PyPy support
53 |
54 | 0.7 21/06/2015
55 | * Partial fix for dropped columns blowing up replication when replaying binlog with past events
56 | * Skipping GTID tests on DBs not set up to support GTID
57 | * Adding support for skipping the binlog until reaching specified timestamp.
58 | * Add support for BeginLoadQueryEvent and ExecuteLoadQueryEvent
59 |
60 | 0.8 02/01/2016
61 |
62 | * bugfix for decoding values of columns in charset "utf8mb4"
63 | * Close connection on EOF packet
64 | * Don't fail on incomplete dates (with 0 as day or month), such as 2015-00-21 or 2015-05-00
65 | * Fix GtidSet __str__ representation
66 | * Fix typo in TableMapEvent comment
67 | * gtid failed parsing: raise with value
68 | * Explicit close stream connection on exception, to prevent sockets from sitting in CLOSE_WAIT status with PyPy
69 | * Further (micro)optimization: Moving popular column types to top of if/elif, so they can shortcircuit earlier.
70 | * Making Column attribiutes into regular object attributes instead of a dictionary. This makes getting the attribute about 20 times faster.
71 |
72 | 0.9 20/05/2016
73 |
74 | * Allow to specify a pymysql wrapper or subclass
75 | * setup travis to run test on multiple mysql version (5.6 and 5.7)
76 | * test run with mysql5.7
77 | * report-slave: Allow reporting of port if username not supplied
78 | * gtid: fixup parsing on mysql format
79 | * added intvar event
80 | * binlogstream: improve slave report
81 | * Support for slave_uuid
82 | * Report slave in SHOW SLAVE HOSTS
83 | * fix: missing update _next_seq_no
84 | * PyMYSQL 0.7
85 | * Add more contributors
86 |
87 | 0.10 30/11/2016
88 | * row-events: TIME is now encoded as python.timedelta
89 | * Allow users to define ctl_connection_settings and the option to fail when table information is unavailable (#176)
90 | * Decode gtid from network packet
91 | * fixed count of fetched events
92 | * unittest2 is only a test requirement
93 |
94 | 0.11 07/02/2017
95 | * Implement Heartbeat (#191)
96 | * Fix handling of JSON data (#182)
97 |
98 | 0.12 04/04/2017
99 | * Clear table_map if RotateEvent has timestamp of 0 (#197)
100 | * Add support for ignored_tables and ignored_schema (#201)
101 | * failed to get table information (#199)
102 |
103 | 0.13 18/04/2017
104 | * Fixup gtid parsing (#204)
105 | This remove duplicate event from your stream if you use auto_position (the
106 | first one in the stream used to be a duplicate on reconnection, this is no
107 | longer the case).
108 | * Fix struct.pack in ReportSlave (#194)
109 |
110 | 0.14 11/09/2017
111 | * binlogstream: only_tables requires row binlog_format (#209)
112 | * Add implementation of read_int32 (#213)
113 | * fix formatting of GTIDs under py3 (#227)
114 |
115 | 0.15 11/09/2017
116 | * event: fixup gtid serialization
117 |
118 | 0.16 02/02/2018
119 | * json: Fixup string parsing (#246)
120 | * binlogstream: Raise an explanatory exception when binary logging is not enabled. (#245)
121 | * datetime: Fixup parsing (#236)
122 |
123 | 0.17 02/02/2018
124 | * remove python2.6 support
125 |
126 | 0.18 01/03/2018
127 | * bubble up protocol errors
128 |
129 | 0.19 29/04/2019
130 | * Fix Insert NULL in a boolean column returns no rows #288
131 | * Added empty string support for enum
132 | * Fix column order
133 | * Fix float problem about time fieldtype
134 |
135 | 0.20 22/11/2019
136 | * Added support for limited columnar access in MySQL
137 | * Python3 fixes
138 | * Add gtidset comparison methods
139 |
140 | 0.21 22/11/2019
141 | * fix ci configuration
142 | * Added support for limited columnar access in MySQL
143 |
144 | 0.22 20/09/2020
145 | * Support PyMysql with a version greater than 0.9.3
146 | * Fix handling of JSON literal values
147 |
148 | 0.23 25/01/2021
149 | * Add logic to handle inlined ints in large json documents
150 |
151 | 0.24 27/06/2021
152 | * Support PyMySQL 1.0
153 |
154 | 0.25 21/07/2021
155 | * Support 'ZEROFILL' attribute for numeric datatypes
156 |
157 | 0.26 30/08/2021
158 | * Correct timedelta value for negative MySQL TIME datatype
159 | * Fix parsing of row events for MySQL8 partitioned table
160 | * Set PyMySql version minimum to 0.10
161 |
162 | 0.27 18/10/2021
163 | * Parse NULL-bitmask in table map event
164 | * Add support for specifying an end log_pos
165 | * Parse status variables in query event
166 |
167 | 0.28 23/01/2022
168 | * Add support for MariaDB GTID
169 |
170 | 0.29 08/04/2022
171 | * Skip db name parsing if mts_accessed_dbs == 254
172 |
173 | 0.30 28/04/2022
174 | * Fix decoding large json arrays
175 | * Handle null json
176 |
177 | 0.31 12/03/2023
178 | * Fix parse error for query_events with MariaDB
179 |
180 | 0.40 07/05/2023
181 | * Drop support for Python 2.7
182 | * Gtid: remove __cmp__ due to python2 support dropped.
183 | * Mariadb 10.6.12: Mitigate corrupt binlog event bug
184 |
185 | 0.41 03/06/2023
186 | * Zero-pad fixed-length binary fields
187 |
188 | 0.42 25/06/2023
189 | * Add XAPrepareEvent, parse last_committed & sequence_number of GtidEvent
190 |
191 | 0.42.1 09/07/2023
192 | * Fix merging error of XAPrepareEvent
193 |
194 | 0.42.2 16/07/2023
195 | * Fix release error
196 |
197 | 0.43.0 23/07/2023
198 | * Bump PyMySQL to 1.1.0 to solve : LookupError: unknown encoding: utf8mb3
199 |
200 | 0.44.0 12/09/2023
201 | * Add MariadbAnnotateRowsEvent
202 | * Add RandEvent
203 | * Add MariadbStartEncryptionEvent
204 | * Add RowsQueryLogEvent
205 | * Add MariadbBinLogCheckPointEvent
206 | * Add PreviousGtidsEvent
207 | * Add UserVarEvent
208 | * Fix Bug increase history list length and snapshot : Mysql 8.0 version connection was being created along with the opening of a transaction. This led to a problem with looking at the snapshot before creating the table
209 | * Fix Avoid UnicodeDecodeError for non-utf8 QueryEvents
210 | * Enhance Data Integrity with Binlog Event Checksum Verification
211 | * Fix Bug table map event read null_bitmask packet
212 | * Fix Timestamp conversion to return UTC instead of local timezone
213 | * Optimize Handler_read_rnd by removing ORDER BY clause
214 |
215 | 0.45.0 02/10/2023
216 | * Fix Json object, Array parse Error
217 |
218 | 1.0.0 02/10/2023
219 | * remove column schema
220 | * Mysql 8.0.14 version support Table map Event optional metaData extraction
221 | * Sync Column from optional metaData
222 | * Fix parsing of incorrect bytes in 'extradata' for 'rows event'
223 | * Fix remove duplicate Affected columns output
224 | * Enhance Code Quality with Ruff, Black, and Pre-commit
225 | * Enhance Testing with MySQL8 & Update GitHub Actions
226 | * Add Logging when fake rotate event occured
227 | * update logo
228 |
229 | 1.0.1 05/10/2023
230 | * add util module
231 |
232 | 1.0.2 05/10/2023
233 | * Delete charset_list.csv and add directly in CHARSET.py
234 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing to python-mysql-replication
2 |
3 | Firstly, thank you for considering to contribute to `python-mysql-replication`. We appreciate your effort, and to ensure that your contributions align with the project's coding standards, we employ the use of `pre-commit` hooks. This guide will walk you through setting them up.
4 |
5 | ## Setting up pre-commit
6 |
7 | 1. **Install pre-commit**
8 |
9 | Before you can use `pre-commit`, you need to install it. You can do so using `pip`:
10 |
11 | ```bash
12 | pip install pre-commit
13 | ```
14 |
15 | 2. **Install the pre-commit hooks**
16 |
17 | Navigate to the root directory of your cloned `python-mysql-replication` repository and run:
18 |
19 | ```bash
20 | pre-commit install
21 | ```
22 |
23 | This will install the `pre-commit` hooks to your local repository.
24 |
25 | 3. **Make sure to stage your changes**
26 |
27 | `pre-commit` will only check the files that are staged in git. So make sure to `git add` any new changes you made before running `pre-commit`.
28 |
29 | 4. **Run pre-commit manually (Optional)**
30 |
31 | Before committing, you can manually run:
32 |
33 | ```bash
34 | pre-commit run --all-files
35 | ```
36 |
37 | This will run the hooks on all the files. If there's any issue, the hooks will let you know.
38 |
39 | ## If you encounter issues
40 |
41 | If you run into any problems with the hooks, you can always skip them using:
42 |
43 | ```bash
44 | git commit -m "Your commit message" --no-verify
45 | ```
46 |
47 | However, please note that skipping hooks might lead to CI failures if we use these checks in our CI pipeline. It's always recommended to adhere to the checks to ensure a smooth contribution process.
48 |
49 | ---
50 |
51 | That's it! With these steps, you should be well on your way to contributing to `python-mysql-replication`. We look forward to your contributions!
52 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | python-mysql-replication
2 | ========================
3 |
4 |
5 |
6 |
7 | Pure Python Implementation of MySQL replication protocol build on top of PyMYSQL. This allows you to receive event like insert, update, delete with their datas and raw SQL queries.
8 |
9 | Use cases
10 | ===========
11 |
12 | * MySQL to NoSQL database replication
13 | * MySQL to search engine replication
14 | * Invalidate cache when something change in database
15 | * Audit
16 | * Real time analytics
17 |
18 | Documentation
19 | ==============
20 |
21 | A work in progress documentation is available here: https://python-mysql-replication.readthedocs.org/en/latest/
22 |
23 | Instruction about building documentation is available here:
24 | https://python-mysql-replication.readthedocs.org/en/latest/developement.html
25 |
26 |
27 | Installation
28 | =============
29 |
30 | ```
31 | pip install mysql-replication
32 | ```
33 |
34 | Mailing List
35 | ==============
36 |
37 | You can get support and discuss about new features on:
38 | https://groups.google.com/d/forum/python-mysql-replication
39 |
40 |
41 |
42 | Project status
43 | ================
44 |
45 | The project is test with:
46 | * MySQL 5.5, 5.6 and 5.7 (v0.1 ~ v0.45)
47 | * MySQL 8.0.14 (v1.0 ~)
48 | * Python 3.3, 3.4, 3.5 and 3.6 (3.2 is not supported)
49 | * PyPy (really faster than the standard Python interpreter)
50 |
51 | MySQL version 8.0.14 and later Set global variable binlog_row_metadata='FULL' and binlog_row_image='FULL'
52 |
53 | The project is used in production for critical stuff in some
54 | medium internet corporations. But all use case as not
55 | been perfectly test in the real world.
56 |
57 | Limitations
58 | =============
59 |
60 | https://python-mysql-replication.readthedocs.org/en/latest/limitations.html
61 |
62 | Featured
63 | =============
64 |
65 | [Data Pipelines Pocket Reference](https://www.oreilly.com/library/view/data-pipelines-pocket/9781492087823/) (by James Densmore, O'Reilly): Introduced and exemplified in Chapter 4: Data Ingestion: Extracting Data.
66 |
67 | [Streaming Changes in a Database with Amazon Kinesis](https://aws.amazon.com/blogs/database/streaming-changes-in-a-database-with-amazon-kinesis/) (by Emmanuel Espina, Amazon Web Services)
68 |
69 | [Near Zero Downtime Migration from MySQL to DynamoDB](https://aws.amazon.com/ko/blogs/big-data/near-zero-downtime-migration-from-mysql-to-dynamodb/) (by YongSeong Lee, Amazon Web Services)
70 |
71 | [Enable change data capture on Amazon RDS for MySQL applications that are using XA transactions](https://aws.amazon.com/ko/blogs/database/enable-change-data-capture-on-amazon-rds-for-mysql-applications-that-are-using-xa-transactions/) (by Baruch Assif, Amazon Web Services)
72 |
73 | Projects using this library
74 | ===========================
75 |
76 | * pg_chameleon: Migration and replica from MySQL to PostgreSQL https://github.com/the4thdoctor/pg_chameleon
77 | * Yelp Data Pipeline: https://engineeringblog.yelp.com/2016/11/open-sourcing-yelps-data-pipeline.html
78 | * Singer.io Tap for MySQL (https://github.com/singer-io/tap-mysql)
79 | * MySQL River Plugin for ElasticSearch: https://github.com/scharron/elasticsearch-river-mysql
80 | * Ditto: MySQL to MemSQL replicator https://github.com/memsql/ditto
81 | * ElasticMage: Full Magento integration with ElasticSearch https://github.com/ElasticMage/elasticmage
82 | * Cache buster: an automatic cache invalidation system https://github.com/rackerlabs/cache-busters
83 | * Zabbix collector for OpenTSDB https://github.com/OpenTSDB/tcollector/blob/master/collectors/0/zabbix_bridge.py
84 | * Meepo: Event sourcing and event broadcasting for databases. https://github.com/eleme/meepo
85 | * Python MySQL Replication Blinker: This package read events from MySQL binlog and send to blinker's signal. https://github.com/tarzanjw/python-mysql-replication-blinker
86 | * aiomysql_replication: Fork supporting asyncio https://github.com/jettify/aiomysql_replication
87 | * python-mysql-eventprocessor: Daemon interface for handling MySQL binary log events. https://github.com/jffifa/python-mysql-eventprocessor
88 | * mymongo: MySQL to mongo replication https://github.com/njordr/mymongo
89 | * pg_ninja: The ninja elephant obfuscation and replica tool https://github.com/transferwise/pg_ninja/ (http://tech.transferwise.com/pg_ninja-replica-with-obfuscation/)
90 | * MySQLStreamer: MySQLStreamer is a database change data capture and publish system https://github.com/Yelp/mysql_streamer
91 | * binlog2sql: a popular binlog parser that could convert raw binlog to sql and also could generate flashback sql from raw binlog (https://github.com/danfengcao/binlog2sql)
92 | * Streaming mysql binlog replication to Snowflake/Redshift/BigQuery (https://github.com/trainingrocket/mysql-binlog-replication)
93 | * MySQL to Kafka (https://github.com/scottpersinger/mysql-to-kafka/)
94 | * Aventri MySQL Monitor (https://github.com/aventri/mysql-monitor)
95 | * BitSwanPump: A real-time stream processor (https://github.com/LibertyAces/BitSwanPump)
96 | * clickhouse-mysql-data-reader: https://github.com/Altinity/clickhouse-mysql-data-reader
97 | * py-mysql-elasticsearch-sync: https://github.com/jaehyeonpy/py-mysql-elasticsearch-sync
98 | * synch: Sync data from other DB to ClickHouse (https://github.com/long2ice/synch)
99 |
100 | MySQL server settings
101 | =========================
102 |
103 | In your MySQL server configuration file you need to enable replication:
104 |
105 | [mysqld]
106 | server-id = 1
107 | log_bin = /var/log/mysql/mysql-bin.log
108 | expire_logs_days = 10
109 | max_binlog_size = 100M
110 | binlog-format = row #Very important if you want to receive write, update and delete row events
111 |
112 | Examples
113 | =========
114 |
115 | All examples are available in the [examples directory](https://github.com/noplay/python-mysql-replication/tree/master/examples)
116 |
117 |
118 | This example will dump all replication events to the console:
119 |
120 | ```python
121 | from pymysqlreplication import BinLogStreamReader
122 |
123 | mysql_settings = {'host': '127.0.0.1', 'port': 3306, 'user': 'root', 'passwd': ''}
124 |
125 | stream = BinLogStreamReader(connection_settings = mysql_settings, server_id=100)
126 |
127 | for binlogevent in stream:
128 | binlogevent.dump()
129 |
130 | stream.close()
131 | ```
132 |
133 | For this SQL sessions:
134 |
135 | ```sql
136 | CREATE DATABASE test;
137 | use test;
138 | CREATE TABLE test4 (id int NOT NULL AUTO_INCREMENT, data VARCHAR(255), data2 VARCHAR(255), PRIMARY KEY(id));
139 | INSERT INTO test4 (data,data2) VALUES ("Hello", "World");
140 | UPDATE test4 SET data = "World", data2="Hello" WHERE id = 1;
141 | DELETE FROM test4 WHERE id = 1;
142 | ```
143 |
144 | Output will be:
145 |
146 | === RotateEvent ===
147 | Date: 1970-01-01T01:00:00
148 | Event size: 24
149 | Read bytes: 0
150 |
151 | === FormatDescriptionEvent ===
152 | Date: 2012-10-07T15:03:06
153 | Event size: 84
154 | Read bytes: 0
155 |
156 | === QueryEvent ===
157 | Date: 2012-10-07T15:03:16
158 | Event size: 64
159 | Read bytes: 64
160 | Schema: test
161 | Execution time: 0
162 | Query: CREATE DATABASE test
163 |
164 | === QueryEvent ===
165 | Date: 2012-10-07T15:03:16
166 | Event size: 151
167 | Read bytes: 151
168 | Schema: test
169 | Execution time: 0
170 | Query: CREATE TABLE test4 (id int NOT NULL AUTO_INCREMENT, data VARCHAR(255), data2 VARCHAR(255), PRIMARY KEY(id))
171 |
172 | === QueryEvent ===
173 | Date: 2012-10-07T15:03:16
174 | Event size: 49
175 | Read bytes: 49
176 | Schema: test
177 | Execution time: 0
178 | Query: BEGIN
179 |
180 | === TableMapEvent ===
181 | Date: 2012-10-07T15:03:16
182 | Event size: 31
183 | Read bytes: 30
184 | Table id: 781
185 | Schema: test
186 | Table: test4
187 | Columns: 3
188 |
189 | === WriteRowsEvent ===
190 | Date: 2012-10-07T15:03:16
191 | Event size: 27
192 | Read bytes: 10
193 | Table: test.test4
194 | Affected columns: 3
195 | Changed rows: 1
196 | Values:
197 | --
198 | * data : Hello
199 | * id : 1
200 | * data2 : World
201 |
202 | === XidEvent ===
203 | Date: 2012-10-07T15:03:16
204 | Event size: 8
205 | Read bytes: 8
206 | Transaction ID: 14097
207 |
208 | === QueryEvent ===
209 | Date: 2012-10-07T15:03:17
210 | Event size: 49
211 | Read bytes: 49
212 | Schema: test
213 | Execution time: 0
214 | Query: BEGIN
215 |
216 | === TableMapEvent ===
217 | Date: 2012-10-07T15:03:17
218 | Event size: 31
219 | Read bytes: 30
220 | Table id: 781
221 | Schema: test
222 | Table: test4
223 | Columns: 3
224 |
225 | === UpdateRowsEvent ===
226 | Date: 2012-10-07T15:03:17
227 | Event size: 45
228 | Read bytes: 11
229 | Table: test.test4
230 | Affected columns: 3
231 | Changed rows: 1
232 | Affected columns: 3
233 | Values:
234 | --
235 | * data : Hello => World
236 | * id : 1 => 1
237 | * data2 : World => Hello
238 |
239 | === XidEvent ===
240 | Date: 2012-10-07T15:03:17
241 | Event size: 8
242 | Read bytes: 8
243 | Transaction ID: 14098
244 |
245 | === QueryEvent ===
246 | Date: 2012-10-07T15:03:17
247 | Event size: 49
248 | Read bytes: 49
249 | Schema: test
250 | Execution time: 1
251 | Query: BEGIN
252 |
253 | === TableMapEvent ===
254 | Date: 2012-10-07T15:03:17
255 | Event size: 31
256 | Read bytes: 30
257 | Table id: 781
258 | Schema: test
259 | Table: test4
260 | Columns: 3
261 |
262 | === DeleteRowsEvent ===
263 | Date: 2012-10-07T15:03:17
264 | Event size: 27
265 | Read bytes: 10
266 | Table: test.test4
267 | Affected columns: 3
268 | Changed rows: 1
269 | Values:
270 | --
271 | * data : World
272 | * id : 1
273 | * data2 : Hello
274 |
275 | === XidEvent ===
276 | Date: 2012-10-07T15:03:17
277 | Event size: 8
278 | Read bytes: 8
279 | Transaction ID: 14099
280 |
281 |
282 | Tests
283 | ========
284 | When it's possible we have a unit test.
285 |
286 | More information is available here:
287 | https://python-mysql-replication.readthedocs.org/en/latest/developement.html
288 |
289 | Changelog
290 | ==========
291 | https://github.com/noplay/python-mysql-replication/blob/master/CHANGELOG
292 |
293 | Similar projects
294 | ==================
295 | * Kodoma: Ruby-binlog based MySQL replication listener https://github.com/y310/kodama
296 | * MySQL Hadoop Applier: C++ version http://dev.mysql.com/tech-resources/articles/mysql-hadoop-applier.html
297 | * Java: https://github.com/shyiko/mysql-binlog-connector-java
298 | * GO: https://github.com/siddontang/go-mysql
299 | * PHP: Based on this this project https://github.com/krowinski/php-mysql-replication and https://github.com/fengxiangyun/mysql-replication
300 | * .NET: https://github.com/SciSharp/dotnet-mysql-replication
301 | * .NET Core: https://github.com/rusuly/MySqlCdc
302 |
303 | Special thanks
304 | ================
305 | * MySQL binlog from Jeremy Cole was a great source of knowledge about MySQL replication protocol https://github.com/jeremycole/mysql_binlog
306 | * Samuel Charron for his help https://github.com/scharron
307 |
308 | Contributors
309 | ==============
310 |
311 | Major contributor:
312 | * Julien Duponchelle Original author https://github.com/noplay
313 | * bjoernhaeuser for his bugs fixing, improvements and community support https://github.com/bjoernhaeuser
314 | * Arthur Gautier gtid, slave report... https://github.com/baloo
315 |
316 | Maintainer:
317 | * Julien Duponchelle Original author https://github.com/noplay
318 | * Sean-k1 https://github.com/sean-k1
319 | * dongwook-chan https://github.com/dongwook-chan
320 |
321 | Other contributors:
322 | * Dvir Volk for bug fix https://github.com/dvirsky
323 | * Lior Sion code cleanup and improvements https://github.com/liorsion
324 | * Lx Yu code improvements, primary keys detections https://github.com/lxyu
325 | * Young King for pymysql 0.6 support https://github.com/youngking
326 | * David Reid checksum checking fix https://github.com/dreid
327 | * Alex Gaynor fix smallint24 https://github.com/alex
328 | * lifei NotImplementedEvent https://github.com/lifei
329 | * Maralla Python 3.4 fix https://github.com/maralla
330 | * Daniel Gavrila more MySQL error codes https://github.com/danielduduta
331 | * Bernardo Sulzbach code cleanup https://github.com/mafagafogigante
332 | * Darioush Jalali Python 2.6 backport https://github.com/darioush
333 | * Jasonz bug fixes https://github.com/jasonzzz
334 | * Bartek Ogryczak cleanup and improvements https://github.com/vartec
335 | * Wang, Xiaozhe cleanup https://github.com/chaoslawful
336 | * siddontang improvements https://github.com/siddontang
337 | * Cheng Chen Python 2.6 compatibility https://github.com/cccc1999
338 | * Jffifa utf8mb4 compatibility https://github.com/jffifa
339 | * Romuald Brunet bug fixes https://github.com/romuald
340 | * Cédric Hourcade Don't fail on incomplete dates https://github.com/hc
341 | * Giacomo Lozito Explicit close stream connection on exception https://github.com/giacomolozito
342 | * Giovanni F. MySQL 5.7 support https://github.com/26fe
343 | * Igor Mastak intvar event https://github.com/mastak
344 | * Xie Zhenye fix missing update _next_seq_no https://github.com/xiezhenye
345 | * Abrar Sheikh: Multiple contributions https://github.com/abrarsheikh
346 | * Keegan Parker: secondary database for reference schema https://github.com/kdparker
347 | * Troy J. Farrell Clear table_map if RotateEvent has timestamp of 0 https://github.com/troyjfarrell
348 | * Zhanwei Wang Fail to get table informations https://github.com/wangzw
349 | * Alexander Ignatov Fix the JSON literal
350 | * Garen Chan Support PyMysql with a version greater than 0.9.3 https://github.com/garenchan
351 | * Mike Ascah: Add logic to handle inlined ints in large json documents ttps://github.com/mascah
352 | * Hiroaki Kawai: PyMySQL 1.0 support (https://github.com/hkwi)
353 | * Dongwook Chan: Support for ZEROFILL, Correct timedelta value for negative MySQL TIME datatype, Fix parsing of row events for MySQL8 partitioned table, Parse status variables in query event, Parse status variables in query event , Fix parse errors with MariaDB (https://github.com/dongwook-chan)
354 | * Paul Vickers: Add support for specifying an end log_pos (https://github.com/paulvic)
355 | * Samira El Aabidi: Add support for MariaDB GTID (https://github.com/Samira-El)
356 | * Oliver Seemann: Handle large json, github actions,
357 | Zero-pad fixed-length binary fields (https://github.com/oseemann)
358 | * Mahadir Ahmad: Handle null json payload (https://github.com/mahadirz)
359 | * Axel Viala: Removal of Python 2.7 (https://github.com/darnuria)
360 | * Etern: Add XAPrepareEvent, parse last_committed & sequence_number of GtidEvent (https://github.com/etern)
361 |
362 | Thanks to GetResponse for their support
363 |
364 | Licence
365 | =======
366 | Copyright 2012-2023 Julien Duponchelle
367 |
368 | Licensed under the Apache License, Version 2.0 (the "License");
369 | you may not use this file except in compliance with the License.
370 | You may obtain a copy of the License at
371 |
372 | http://www.apache.org/licenses/LICENSE-2.0
373 |
374 | Unless required by applicable law or agreed to in writing, software
375 | distributed under the License is distributed on an "AS IS" BASIS,
376 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
377 | See the License for the specific language governing permissions and
378 | limitations under the License.
379 |
--------------------------------------------------------------------------------
/TODO:
--------------------------------------------------------------------------------
1 | * Support ALTER TABLE
2 | * End user documentation
3 | * Test transaction support
4 | * MySQL 5.6 support: http://dev.mysql.com/doc/internals/en/row-based-replication.html#rows-event
5 | * Support binlog_row_image=minimal or binlog_row_image=noblob
6 | * Raise exception if too much connection lost
7 | * Test log file change
8 |
--------------------------------------------------------------------------------
/docker-compose-test.yml:
--------------------------------------------------------------------------------
1 | version: '3.4'
2 |
3 | x-mysql: &mysql
4 | environment:
5 | MYSQL_ALLOW_EMPTY_PASSWORD: true
6 | command: >
7 | mysqld
8 | --log-bin=mysql-bin.log
9 | --server-id 1
10 | --binlog-format=row
11 | --gtid_mode=on
12 | --enforce-gtid-consistency=on
13 |
14 | x-mariadb: &mariadb
15 | environment:
16 | MARIADB_ALLOW_EMPTY_ROOT_PASSWORD: 1
17 | command: >
18 | --server-id=1
19 | --default-authentication-plugin=mysql_native_password
20 | --log-bin=master-bin
21 | --binlog-format=row
22 |
23 | services:
24 | percona-5.7-ctl:
25 | <<: *mysql
26 | image: percona:5.7
27 | ports:
28 | - "3307:3306"
29 | networks:
30 | - default
31 |
32 | percona-5.7:
33 | <<: *mysql
34 | image: percona:5.7
35 | ports:
36 | - "3306:3306"
37 | networks:
38 | - default
39 |
40 | percona-8.0:
41 | <<: *mysql
42 | image: percona:8.0
43 | platform: linux/amd64
44 | ports:
45 | - "3309:3306"
46 | networks:
47 | - default
48 |
49 | mariadb-10.6:
50 | <<: *mariadb
51 | image: mariadb:10.6
52 | ports:
53 | - "3308:3306"
54 | volumes:
55 | - type: bind
56 | source: ./.mariadb
57 | target: /opt/key_file
58 | - type: bind
59 | source: ./.mariadb/my.cnf
60 | target: /etc/mysql/my.cnf
61 | networks:
62 | - default
63 |
64 | pymysqlreplication:
65 | build:
66 | context: .
67 | dockerfile: test.Dockerfile
68 | args:
69 | BASE_IMAGE: python:3.11-alpine
70 | MYSQL_5_7: percona-5.7
71 | MYSQL_5_7_CTL: percona-5.7-ctl
72 | MYSQL_5_7_CTL_PORT: 3306
73 | MYSQL_8_0: percona-8.0
74 | MYSQL_8_0_PORT: 3306
75 | MARIADB_10_6: mariadb-10.6
76 | MARIADB_10_6_PORT: 3306
77 |
78 | command:
79 | - /bin/sh
80 | - -ce
81 | - |
82 | echo "wait mysql server"
83 |
84 | while :
85 | do
86 | if mysql -h percona-5.7 --user=root --execute "SELECT version();" 2>&1 >/dev/null && mysql -h percona-5.7-ctl --user=root --execute "SELECT version();" 2>&1 >/dev/null && mysql -h percona-8.0 --user=root --execute "SELECT version();" 2>&1 >/dev/null; then
87 | break
88 | fi
89 | sleep 1
90 | done
91 |
92 | echo "run pytest"
93 | pytest -k "not test_no_trailing_rotate_event and not test_end_log_pos"
94 |
95 | working_dir: /pymysqlreplication
96 | networks:
97 | - default
98 | depends_on:
99 | - percona-5.7
100 | - percona-5.7-ctl
101 | - percona-8.0
102 |
103 | networks:
104 | default:
105 | driver: bridge
106 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.4'
2 |
3 | x-mysql: &mysql
4 | environment:
5 | MYSQL_ALLOW_EMPTY_PASSWORD: true
6 | command: >
7 | mysqld
8 | --log-bin=mysql-bin.log
9 | --server-id 1
10 | --binlog-format=row
11 | --gtid_mode=on
12 | --enforce-gtid-consistency=on
13 |
14 | x-mariadb: &mariadb
15 | environment:
16 | MARIADB_ALLOW_EMPTY_ROOT_PASSWORD: 1
17 | command: >
18 | --log-bin=master-bin
19 | --server-id=1
20 | --default-authentication-plugin=mysql_native_password
21 | --binlog-format=row
22 |
23 | services:
24 | percona-5.7:
25 | <<: *mysql
26 | image: percona:5.7
27 | ports:
28 | - "3306:3306"
29 |
30 | percona-5.7-ctl:
31 | <<: *mysql
32 | image: percona:5.7
33 | ports:
34 | - "3307:3306"
35 |
36 | percona-8.0:
37 | <<: *mysql
38 | image: percona:8.0
39 | ports:
40 | - "3309:3306"
41 |
42 | mariadb-10.6:
43 | <<: *mariadb
44 | image: mariadb:10.6
45 | ports:
46 | - "3308:3306"
47 | volumes:
48 | - type: bind
49 | source: ./.mariadb
50 | target: /opt/key_file
51 | - type: bind
52 | source: ./.mariadb/my.cnf
53 | target: /etc/mysql/my.cnf
54 |
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | PAPER =
8 | BUILDDIR = _build
9 |
10 | # Internal variables.
11 | PAPEROPT_a4 = -D latex_paper_size=a4
12 | PAPEROPT_letter = -D latex_paper_size=letter
13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
14 | # the i18n builder cannot share the environment and doctrees with the others
15 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
16 |
17 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext
18 |
19 | help:
20 | @echo "Please use \`make ' where is one of"
21 | @echo " html to make standalone HTML files"
22 | @echo " dirhtml to make HTML files named index.html in directories"
23 | @echo " singlehtml to make a single large HTML file"
24 | @echo " pickle to make pickle files"
25 | @echo " json to make JSON files"
26 | @echo " htmlhelp to make HTML files and a HTML help project"
27 | @echo " qthelp to make HTML files and a qthelp project"
28 | @echo " devhelp to make HTML files and a Devhelp project"
29 | @echo " epub to make an epub"
30 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter"
31 | @echo " latexpdf to make LaTeX files and run them through pdflatex"
32 | @echo " text to make text files"
33 | @echo " man to make manual pages"
34 | @echo " texinfo to make Texinfo files"
35 | @echo " info to make Texinfo files and run them through makeinfo"
36 | @echo " gettext to make PO message catalogs"
37 | @echo " changes to make an overview of all changed/added/deprecated items"
38 | @echo " linkcheck to check all external links for integrity"
39 | @echo " doctest to run all doctests embedded in the documentation (if enabled)"
40 |
41 | clean:
42 | -rm -rf $(BUILDDIR)/*
43 |
44 | html:
45 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
46 | @echo
47 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html."
48 |
49 | dirhtml:
50 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml
51 | @echo
52 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml."
53 |
54 | singlehtml:
55 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml
56 | @echo
57 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml."
58 |
59 | pickle:
60 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle
61 | @echo
62 | @echo "Build finished; now you can process the pickle files."
63 |
64 | json:
65 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json
66 | @echo
67 | @echo "Build finished; now you can process the JSON files."
68 |
69 | htmlhelp:
70 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp
71 | @echo
72 | @echo "Build finished; now you can run HTML Help Workshop with the" \
73 | ".hhp project file in $(BUILDDIR)/htmlhelp."
74 |
75 | qthelp:
76 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp
77 | @echo
78 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \
79 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:"
80 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PythonMySQLReplication.qhcp"
81 | @echo "To view the help file:"
82 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PythonMySQLReplication.qhc"
83 |
84 | devhelp:
85 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp
86 | @echo
87 | @echo "Build finished."
88 | @echo "To view the help file:"
89 | @echo "# mkdir -p $$HOME/.local/share/devhelp/PythonMySQLReplication"
90 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PythonMySQLReplication"
91 | @echo "# devhelp"
92 |
93 | epub:
94 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub
95 | @echo
96 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub."
97 |
98 | latex:
99 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
100 | @echo
101 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex."
102 | @echo "Run \`make' in that directory to run these through (pdf)latex" \
103 | "(use \`make latexpdf' here to do that automatically)."
104 |
105 | latexpdf:
106 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
107 | @echo "Running LaTeX files through pdflatex..."
108 | $(MAKE) -C $(BUILDDIR)/latex all-pdf
109 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
110 |
111 | text:
112 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text
113 | @echo
114 | @echo "Build finished. The text files are in $(BUILDDIR)/text."
115 |
116 | man:
117 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man
118 | @echo
119 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man."
120 |
121 | texinfo:
122 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
123 | @echo
124 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo."
125 | @echo "Run \`make' in that directory to run these through makeinfo" \
126 | "(use \`make info' here to do that automatically)."
127 |
128 | info:
129 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
130 | @echo "Running Texinfo files through makeinfo..."
131 | make -C $(BUILDDIR)/texinfo info
132 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
133 |
134 | gettext:
135 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
136 | @echo
137 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale."
138 |
139 | changes:
140 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes
141 | @echo
142 | @echo "The overview file is in $(BUILDDIR)/changes."
143 |
144 | linkcheck:
145 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck
146 | @echo
147 | @echo "Link check complete; look for any errors in the above output " \
148 | "or in $(BUILDDIR)/linkcheck/output.txt."
149 |
150 | doctest:
151 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest
152 | @echo "Testing of doctests in the sources finished, look at the " \
153 | "results in $(BUILDDIR)/doctest/output.txt."
154 |
--------------------------------------------------------------------------------
/docs/binlogstream.rst:
--------------------------------------------------------------------------------
1 | ##################
2 | BinLogStreamReader
3 | ##################
4 |
5 |
6 | .. automodule:: pymysqlreplication.binlogstream
7 | :members:
8 |
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # Python MySQL Replication documentation build configuration file, created by
2 | # sphinx-quickstart on Sun Sep 30 15:04:27 2012.
3 | #
4 | # This file is execfile()d with the current directory set to its containing dir.
5 | #
6 | # Note that not all possible configuration values are present in this
7 | # autogenerated file.
8 | #
9 | # All configuration values have a default; values that are commented out
10 | # serve to show the default.
11 |
12 | import sys
13 | import os
14 |
15 | # If extensions (or modules to document with autodoc) are in another directory,
16 | # add these directories to sys.path here. If the directory is relative to the
17 | # documentation root, use os.path.abspath to make it absolute, like shown here.
18 | sys.path.insert(0, os.path.abspath(".."))
19 |
20 | # -- General configuration -----------------------------------------------------
21 |
22 | # If your documentation needs a minimal Sphinx version, state it here.
23 | # needs_sphinx = '1.0'
24 |
25 | # Add any Sphinx extension module names here, as strings. They can be extensions
26 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
27 | extensions = ["sphinx.ext.autodoc", "sphinx.ext.viewcode"]
28 |
29 | # Add any paths that contain templates here, relative to this directory.
30 | templates_path = ["_templates"]
31 |
32 | # The suffix of source filenames.
33 | source_suffix = ".rst"
34 |
35 | # The encoding of source files.
36 | # source_encoding = 'utf-8-sig'
37 |
38 | # The master toctree document.
39 | master_doc = "index"
40 |
41 | # General information about the project.
42 | project = "Python MySQL Replication"
43 | copyright = "2012-2023, Julien Duponchelle"
44 |
45 | # The version info for the project you're documenting, acts as replacement for
46 | # |version| and |release|, also used in various other places throughout the
47 | # built documents.
48 | #
49 | # The short X.Y version.
50 | version = "1.0.2"
51 | # The full version, including alpha/beta/rc tags.
52 | release = "1.0.2"
53 |
54 | # The language for content autogenerated by Sphinx. Refer to documentation
55 | # for a list of supported languages.
56 | # language = None
57 |
58 | # There are two options for replacing |today|: either, you set today to some
59 | # non-false value, then it is used:
60 | # today = ''
61 | # Else, today_fmt is used as the format for a strftime call.
62 | # today_fmt = '%B %d, %Y'
63 |
64 | # List of patterns, relative to source directory, that match files and
65 | # directories to ignore when looking for source files.
66 | exclude_patterns = ["_build"]
67 |
68 | # The reST default role (used for this markup: `text`) to use for all documents.
69 | # default_role = None
70 |
71 | # If true, '()' will be appended to :func: etc. cross-reference text.
72 | # add_function_parentheses = True
73 |
74 | # If true, the current module name will be prepended to all description
75 | # unit titles (such as .. function::).
76 | # add_module_names = True
77 |
78 | # If true, sectionauthor and moduleauthor directives will be shown in the
79 | # output. They are ignored by default.
80 | # show_authors = False
81 |
82 | # The name of the Pygments (syntax highlighting) style to use.
83 | pygments_style = "sphinx"
84 |
85 | # A list of ignored prefixes for module index sorting.
86 | # modindex_common_prefix = []
87 |
88 |
89 | # -- Options for HTML output ---------------------------------------------------
90 |
91 | # The theme to use for HTML and HTML Help pages. See the documentation for
92 | # a list of builtin themes.
93 | html_theme = "default"
94 |
95 | # Theme options are theme-specific and customize the look and feel of a theme
96 | # further. For a list of options available for each theme, see the
97 | # documentation.
98 | # html_theme_options = {}
99 |
100 | # Add any paths that contain custom themes here, relative to this directory.
101 | # html_theme_path = []
102 |
103 | # The name for this set of Sphinx documents. If None, it defaults to
104 | # " v documentation".
105 | # html_title = None
106 |
107 | # A shorter title for the navigation bar. Default is the same as html_title.
108 | # html_short_title = None
109 |
110 | # The name of an image file (relative to this directory) to place at the top
111 | # of the sidebar.
112 | # html_logo = None
113 |
114 | # The name of an image file (within the static path) to use as favicon of the
115 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
116 | # pixels large.
117 | # html_favicon = None
118 |
119 | # Add any paths that contain custom static files (such as style sheets) here,
120 | # relative to this directory. They are copied after the builtin static files,
121 | # so a file named "default.css" will overwrite the builtin "default.css".
122 | html_static_path = ["_static"]
123 |
124 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
125 | # using the given strftime format.
126 | # html_last_updated_fmt = '%b %d, %Y'
127 |
128 | # If true, SmartyPants will be used to convert quotes and dashes to
129 | # typographically correct entities.
130 | # html_use_smartypants = True
131 |
132 | # Custom sidebar templates, maps document names to template names.
133 | # html_sidebars = {}
134 |
135 | # Additional templates that should be rendered to pages, maps page names to
136 | # template names.
137 | # html_additional_pages = {}
138 |
139 | # If false, no module index is generated.
140 | # html_domain_indices = True
141 |
142 | # If false, no index is generated.
143 | # html_use_index = True
144 |
145 | # If true, the index is split into individual pages for each letter.
146 | # html_split_index = False
147 |
148 | # If true, links to the reST sources are added to the pages.
149 | # html_show_sourcelink = True
150 |
151 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
152 | # html_show_sphinx = True
153 |
154 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
155 | # html_show_copyright = True
156 |
157 | # If true, an OpenSearch description file will be output, and all pages will
158 | # contain a tag referring to it. The value of this option must be the
159 | # base URL from which the finished HTML is served.
160 | # html_use_opensearch = ''
161 |
162 | # This is the file name suffix for HTML files (e.g. ".xhtml").
163 | # html_file_suffix = None
164 |
165 | # Output file base name for HTML help builder.
166 | htmlhelp_basename = "PythonMySQLReplicationdoc"
167 |
168 |
169 | # -- Options for LaTeX output --------------------------------------------------
170 |
171 | latex_elements = {
172 | # The paper size ('letterpaper' or 'a4paper').
173 | #'papersize': 'letterpaper',
174 | # The font size ('10pt', '11pt' or '12pt').
175 | #'pointsize': '10pt',
176 | # Additional stuff for the LaTeX preamble.
177 | #'preamble': '',
178 | }
179 |
180 | # Grouping the document tree into LaTeX files. List of tuples
181 | # (source start file, target name, title, author, documentclass [howto/manual]).
182 | latex_documents = [
183 | (
184 | "index",
185 | "PythonMySQLReplication.tex",
186 | "Python MySQL Replication Documentation",
187 | "Julien Duponchelle",
188 | "manual",
189 | ),
190 | ]
191 |
192 | # The name of an image file (relative to this directory) to place at the top of
193 | # the title page.
194 | # latex_logo = None
195 |
196 | # For "manual" documents, if this is true, then toplevel headings are parts,
197 | # not chapters.
198 | # latex_use_parts = False
199 |
200 | # If true, show page references after internal links.
201 | # latex_show_pagerefs = False
202 |
203 | # If true, show URL addresses after external links.
204 | # latex_show_urls = False
205 |
206 | # Documents to append as an appendix to all manuals.
207 | # latex_appendices = []
208 |
209 | # If false, no module index is generated.
210 | # latex_domain_indices = True
211 |
212 |
213 | # -- Options for manual page output --------------------------------------------
214 |
215 | # One entry per manual page. List of tuples
216 | # (source start file, name, description, authors, manual section).
217 | man_pages = [
218 | (
219 | "index",
220 | "pythonmysqlreplication",
221 | "Python MySQL Replication Documentation",
222 | ["Julien Duponchelle"],
223 | 1,
224 | )
225 | ]
226 |
227 | # If true, show URL addresses after external links.
228 | # man_show_urls = False
229 |
230 |
231 | # -- Options for Texinfo output ------------------------------------------------
232 |
233 | # Grouping the document tree into Texinfo files. List of tuples
234 | # (source start file, target name, title, author,
235 | # dir menu entry, description, category)
236 | texinfo_documents = [
237 | (
238 | "index",
239 | "PythonMySQLReplication",
240 | "Python MySQL Replication Documentation",
241 | "Julien Duponchelle",
242 | "PythonMySQLReplication",
243 | "One line description of project.",
244 | "Miscellaneous",
245 | ),
246 | ]
247 |
248 | # Documents to append as an appendix to all manuals.
249 | # texinfo_appendices = []
250 |
251 | # If false, no module index is generated.
252 | # texinfo_domain_indices = True
253 |
254 | # How to display URL addresses: 'footnote', 'no', or 'inline'.
255 | # texinfo_show_urls = 'footnote'
256 |
--------------------------------------------------------------------------------
/docs/developement.rst:
--------------------------------------------------------------------------------
1 | #############
2 | Developement
3 | #############
4 |
5 | Contributions
6 | =============
7 |
8 | You can report issues and contribute to the project on: https://github.com/noplay/python-mysql-replication
9 |
10 | The standard way to contribute code to the project is to fork the Github
11 | project and open a pull request with your changes:
12 | https://github.com/noplay/python-mysql-replication
13 |
14 | Don't hesitate to open an issue with what you want to changes if
15 | you want to discuss about it before coding.
16 |
17 |
18 | Tests
19 | ======
20 |
21 | When it's possible we have an unit test.
22 |
23 | *pymysqlreplication/tests/* contains the test suite. The test suite
24 | use the standard *unittest* Python module.
25 |
26 | **Be careful** tests will reset the binary log of your MySQL server.
27 |
28 | Make sure you have the following configuration set in your mysql config file (usually my.cnf on development env):
29 |
30 | ::
31 |
32 | log-bin=mysql-bin
33 | server-id=1
34 | binlog-format = row #Very important if you want to receive write, update and delete row events
35 | gtid_mode=ON
36 | log-slave_updates=true
37 | enforce_gtid_consistency
38 |
39 |
40 | Run tests with
41 |
42 | ::
43 |
44 | py.test -k "not test_no_trailing_rotate_event"
45 |
46 | This will skip the ``test_no_trailing_rotate_event`` which requires that the
47 | user running the test have permission to alter the binary log files.
48 |
49 | Running mysql in docker (main):
50 |
51 | ::
52 |
53 | docker run --name python-mysql-replication-tests -e MYSQL_ALLOW_EMPTY_PASSWORD=true -p 3306:3306 --rm percona:latest --log-bin=mysql-bin.log --server-id 1 --binlog-format=row --gtid_mode=on --enforce-gtid-consistency=on --log_slave_updates
54 |
55 | Running mysql in docker (for ctl server):
56 |
57 | ::
58 |
59 | docker run --name python-mysql-replication-tests-ctl --expose=3307 -e MYSQL_ALLOW_EMPTY_PASSWORD=true -p 3307:3307 --rm percona:latest --log-bin=mysql-bin.log --server-id 1 --binlog-format=row --gtid_mode=on --enforce-gtid-consistency=on --log_slave-updates -P 3307
60 |
61 |
62 | Each pull request is tested on Travis CI:
63 | https://travis-ci.org/noplay/python-mysql-replication
64 |
65 | Build the documentation
66 | ========================
67 |
68 | The documentation is available in docs folder. You can
69 | build it using Sphinx:
70 |
71 | ::
72 |
73 | cd docs
74 | pip install sphinx
75 | make html
76 |
--------------------------------------------------------------------------------
/docs/events.rst:
--------------------------------------------------------------------------------
1 | ######
2 | Events
3 | ######
4 |
5 | .. automodule:: pymysqlreplication.event
6 | :members:
7 |
8 | ==========
9 | Row events
10 | ==========
11 |
12 | This events are send by MySQL when data are modified.
13 |
14 | .. automodule:: pymysqlreplication.row_event
15 | :members:
16 |
--------------------------------------------------------------------------------
/docs/examples.rst:
--------------------------------------------------------------------------------
1 | ########
2 | Examples
3 | ########
4 |
5 | You can find a list of working examples here: https://github.com/noplay/python-mysql-replication/tree/master/examples
6 |
7 |
8 | Prerequisites
9 | =============
10 |
11 | The user, you plan to use for the BinaryLogClient, must have `REPLICATION SLAVE` privilege. To get binlog filename and position, the user must be granted at least one of `REPLICATION CLIENT` or `SUPER` as well. To get table info of mysql server, the user also needs `SELECT` privilege on `information_schema.COLUMNS`.
12 | We suggest grant below privileges to the user:
13 |
14 | :command:`GRANT REPLICATION SLAVE, REPLICATION CLIENT, SELECT ON *.* TO 'user'@'host'`
15 |
--------------------------------------------------------------------------------
/docs/index.rst:
--------------------------------------------------------------------------------
1 | .. Python MySQL Replication documentation master file, created by
2 | sphinx-quickstart on Sun Sep 30 15:04:27 2012.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Welcome to Python MySQL Replication's documentation!
7 | ====================================================
8 |
9 | Pure Python Implementation of MySQL replication protocol build on top of PyMYSQL. This allow you to receive event like insert, update, delete with their datas and raw SQL queries.
10 |
11 | Use cases
12 | ===========
13 |
14 | * MySQL to NoSQL database replication
15 | * MySQL to search engine replication
16 | * Invalidate cache when something change in database
17 | * Audit
18 | * Real time analytics
19 |
20 |
21 | Contents
22 | =========
23 |
24 | .. toctree::
25 | :maxdepth: 2
26 |
27 | installation
28 | limitations
29 | binlogstream
30 | events
31 | examples
32 | support
33 | developement
34 | licence
35 |
36 | Indices and tables
37 | ==================
38 |
39 | * :ref:`genindex`
40 | * :ref:`modindex`
41 | * :ref:`search`
42 |
--------------------------------------------------------------------------------
/docs/installation.rst:
--------------------------------------------------------------------------------
1 | #############
2 | Installation
3 | #############
4 |
5 | Python MySQL Replication is available on PyPi.
6 | You can install it with:
7 |
8 | :command:`pip install mysql-replication`
9 |
--------------------------------------------------------------------------------
/docs/licence.rst:
--------------------------------------------------------------------------------
1 | #######
2 | Licence
3 | #######
4 |
5 | Copyright 2012-2014 Julien Duponchelle
6 |
7 | Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
8 |
9 | http://www.apache.org/licenses/LICENSE-2.0
10 |
11 | Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
12 |
--------------------------------------------------------------------------------
/docs/limitations.rst:
--------------------------------------------------------------------------------
1 | ###########
2 | Limitations
3 | ###########
4 |
5 | GEOMETRY
6 | =========
7 | GEOMETRY field is not decoded you will get the raw data.
8 |
9 | binlog_row_image
10 | ================
11 | Only [binlog_row_image=full](http://dev.mysql.com/doc/refman/5.6/en/replication-options-binary-log.html#sysvar_binlog_row_image) is supported (it's the default value).
12 |
13 | BOOLEAN and BOOL
14 | ================
15 | Boolean is returned as TINYINT(1) because it's the reality.
16 |
17 | http://dev.mysql.com/doc/refman/5.6/en/numeric-type-overview.html
18 |
19 | Our discussion about it:
20 | https://github.com/noplay/python-mysql-replication/pull/16
21 |
--------------------------------------------------------------------------------
/docs/support.rst:
--------------------------------------------------------------------------------
1 | #######
2 | Support
3 | #######
4 |
5 | You can get support and discuss about new features on:
6 | https://groups.google.com/d/forum/python-mysql-replication
7 |
8 | You can browse and report issues on:
9 | https://github.com/noplay/python-mysql-replication/issues
10 |
--------------------------------------------------------------------------------
/examples/dump_events.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | #
4 | # Dump all replication events from a remote mysql server
5 | #
6 |
7 | from pymysqlreplication import BinLogStreamReader
8 |
9 | MYSQL_SETTINGS = {"host": "127.0.0.1", "port": 3306, "user": "root", "passwd": ""}
10 |
11 |
12 | def main():
13 | # server_id is your slave identifier, it should be unique.
14 | # set blocking to True if you want to block and wait for the next event at
15 | # the end of the stream
16 | stream = BinLogStreamReader(
17 | connection_settings=MYSQL_SETTINGS, server_id=3, blocking=True
18 | )
19 |
20 | for binlogevent in stream:
21 | binlogevent.dump()
22 |
23 | stream.close()
24 |
25 |
26 | if __name__ == "__main__":
27 | main()
28 |
--------------------------------------------------------------------------------
/examples/logstash/logstash-simple.conf:
--------------------------------------------------------------------------------
1 | input {
2 | stdin {
3 | type => "mysql_event"
4 | format => "json_event"
5 | debug => true
6 | }
7 | }
8 | output {
9 | stdout { debug => true debug_format => "json"}
10 | elasticsearch { embedded => true }
11 | }
12 |
--------------------------------------------------------------------------------
/examples/logstash/mysql_to_logstash.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | #
4 | # Output logstash events to the console from MySQL replication stream
5 | #
6 | # You can pipe it to logstash like this:
7 | # python examples/logstash/mysql_to_logstash.py | java -jar logstash-1.1.13-flatjar.jar agent -f examples/logstash/logstash-simple.conf
8 |
9 | import json
10 | import sys
11 |
12 | from pymysqlreplication import BinLogStreamReader
13 | from pymysqlreplication.row_event import (
14 | DeleteRowsEvent,
15 | UpdateRowsEvent,
16 | WriteRowsEvent,
17 | )
18 |
19 | MYSQL_SETTINGS = {"host": "127.0.0.1", "port": 3306, "user": "root", "passwd": ""}
20 |
21 |
22 | def main():
23 | stream = BinLogStreamReader(
24 | connection_settings=MYSQL_SETTINGS,
25 | server_id=3,
26 | only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent],
27 | )
28 |
29 | for binlogevent in stream:
30 | for row in binlogevent.rows:
31 | event = {"schema": binlogevent.schema, "table": binlogevent.table}
32 |
33 | if isinstance(binlogevent, DeleteRowsEvent):
34 | event["action"] = "delete"
35 | event = dict(event.items() + row["values"].items())
36 | elif isinstance(binlogevent, UpdateRowsEvent):
37 | event["action"] = "update"
38 | event = dict(event.items() + row["after_values"].items())
39 | elif isinstance(binlogevent, WriteRowsEvent):
40 | event["action"] = "insert"
41 | event = dict(event.items() + row["values"].items())
42 | print(json.dumps(event))
43 | sys.stdout.flush()
44 |
45 | stream.close()
46 |
47 |
48 | if __name__ == "__main__":
49 | main()
50 |
--------------------------------------------------------------------------------
/examples/mariadb_gtid/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 |
3 | services:
4 | testdb:
5 | container_name: "testdb"
6 | image: mariadb:10.6
7 | environment:
8 | MARIADB_ALLOW_EMPTY_ROOT_PASSWORD: 1
9 | MARIADB_DATABASE: mydb
10 | MARIADB_USER: replication_user
11 | MARIADB_PASSWORD: secret123passwd
12 | ports:
13 | - "3306:3306"
14 | command: |
15 | --server-id=1
16 | --default-authentication-plugin=mysql_native_password
17 | --log-bin=master-bin
18 | --binlog-format=row
19 | --log-slave-updates=on
20 | --binlog-do-db=mydb
21 | volumes:
22 | - ./queries.sql:/docker-entrypoint-initdb.d/queries.sql
23 | networks:
24 | - mariadb-cluster
25 |
26 | networks:
27 | mariadb-cluster:
28 | ipam:
29 | driver: default
30 | config:
31 | - subnet: 172.200.0.0/24
32 |
--------------------------------------------------------------------------------
/examples/mariadb_gtid/queries.sql:
--------------------------------------------------------------------------------
1 | # configure replication user
2 | grant replication slave on *.* to 'replication_user'@'%';
3 | flush privileges;
4 |
5 | # create objects
6 | create table r1 (
7 | i1 int auto_increment primary key,
8 | c1 varchar(10),
9 | d1 datetime default current_timestamp()
10 | );
11 |
12 | insert into r1 (c1) values ('#1'),('#2'),('#3'),('#4'),('#5'),('#6'),('#7');
13 |
14 | create table r2 (i2 int primary key, d2 datetime) ;
15 | insert into r2 (i2, d2) values (1, now());
16 | insert into r2 (i2, d2) values (2, now());
17 | insert into r2 (i2, d2) values (3, now());
18 | insert into r2 (i2, d2) values (4, now());
19 |
20 | update r1 set c1=concat(c1, '-up');
21 |
22 | select * from r2;
23 |
24 | delete from r1 where i1 < 4;
25 |
26 | drop table r2;
27 |
28 | alter table r1 add column b1 bool default False;
29 | insert into r1 (c1, b1) values ('#8', True);
30 |
--------------------------------------------------------------------------------
/examples/mariadb_gtid/read_event.py:
--------------------------------------------------------------------------------
1 | import pymysql
2 |
3 | from pymysqlreplication import BinLogStreamReader
4 | from pymysqlreplication.event import (
5 | RotateEvent,
6 | MariadbGtidEvent,
7 | MariadbAnnotateRowsEvent,
8 | MariadbBinLogCheckPointEvent,
9 | )
10 | from pymysqlreplication.row_event import (
11 | WriteRowsEvent,
12 | UpdateRowsEvent,
13 | DeleteRowsEvent,
14 | )
15 |
16 | MARIADB_SETTINGS = {
17 | "host": "127.0.0.1",
18 | "port": 3306,
19 | "user": "replication_user",
20 | "passwd": "secret123passwd",
21 | }
22 |
23 |
24 | class MariaDbGTID:
25 | def __init__(self, conn_config):
26 | self.connection = pymysql.connect(**conn_config)
27 |
28 | def query_single_value(self, sql: str):
29 | res = None
30 |
31 | with self.connection.cursor() as cursor:
32 | cursor.execute(sql)
33 | row = cursor.fetchone()
34 | res = str(row[0])
35 |
36 | return res
37 |
38 | def extract_gtid(self, gtid: str, server_id: str):
39 | if gtid is None or server_id is None:
40 | return None
41 |
42 | gtid_parts = gtid.split("-")
43 |
44 | if len(gtid_parts) != 3:
45 | return None
46 |
47 | if gtid_parts[1] == server_id:
48 | return gtid
49 |
50 | return None
51 |
52 | def query_gtid_current_pos(self, server_id: str):
53 | return self.extract_gtid(
54 | self.query_single_value("SELECT @@gtid_current_pos"), server_id
55 | )
56 |
57 | def query_server_id(self):
58 | return int(self.query_single_value("SELECT @@server_id"))
59 |
60 |
61 | if __name__ == "__main__":
62 | db = MariaDbGTID(MARIADB_SETTINGS)
63 |
64 | server_id = db.query_server_id()
65 | print("Server ID: ", server_id)
66 |
67 | # gtid = db.query_gtid_current_pos(server_id)
68 | gtid = "0-1-1" # initial pos
69 |
70 | stream = BinLogStreamReader(
71 | connection_settings=MARIADB_SETTINGS,
72 | server_id=server_id,
73 | blocking=False,
74 | only_events=[
75 | MariadbGtidEvent,
76 | MariadbBinLogCheckPointEvent,
77 | RotateEvent,
78 | WriteRowsEvent,
79 | UpdateRowsEvent,
80 | DeleteRowsEvent,
81 | MariadbAnnotateRowsEvent,
82 | ],
83 | auto_position=gtid,
84 | is_mariadb=True,
85 | annotate_rows_event=True,
86 | )
87 |
88 | print("Starting reading events from GTID ", gtid)
89 | for binlogevent in stream:
90 | binlogevent.dump()
91 |
92 | if isinstance(binlogevent, MariadbGtidEvent):
93 | gtid = binlogevent.gtid
94 |
95 | print("Last encountered GTID: ", gtid)
96 |
97 | stream.close()
98 |
--------------------------------------------------------------------------------
/examples/redis_cache.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | #
4 | # Update a redis server cache when an evenement is trigger
5 | # in MySQL replication log
6 | #
7 |
8 | import redis
9 |
10 | from pymysqlreplication import BinLogStreamReader
11 | from pymysqlreplication.row_event import (
12 | DeleteRowsEvent,
13 | UpdateRowsEvent,
14 | WriteRowsEvent,
15 | )
16 |
17 | MYSQL_SETTINGS = {"host": "127.0.0.1", "port": 3306, "user": "root", "passwd": ""}
18 |
19 |
20 | def main():
21 | r = redis.Redis()
22 |
23 | stream = BinLogStreamReader(
24 | connection_settings=MYSQL_SETTINGS,
25 | server_id=3, # server_id is your slave identifier, it should be unique
26 | only_events=[DeleteRowsEvent, WriteRowsEvent, UpdateRowsEvent],
27 | )
28 |
29 | for binlogevent in stream:
30 | prefix = "%s:%s:" % (binlogevent.schema, binlogevent.table)
31 |
32 | for row in binlogevent.rows:
33 | if isinstance(binlogevent, DeleteRowsEvent):
34 | vals = row["values"]
35 | r.delete(prefix + str(vals["id"]))
36 | elif isinstance(binlogevent, UpdateRowsEvent):
37 | vals = row["after_values"]
38 | r.hmset(prefix + str(vals["id"]), vals)
39 | elif isinstance(binlogevent, WriteRowsEvent):
40 | vals = row["values"]
41 | r.hmset(prefix + str(vals["id"]), vals)
42 |
43 | stream.close()
44 |
45 |
46 | if __name__ == "__main__":
47 | main()
48 |
--------------------------------------------------------------------------------
/getting-started.md:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/logo.svg:
--------------------------------------------------------------------------------
1 |
2 |
10 |
--------------------------------------------------------------------------------
/pymysqlreplication/__init__.py:
--------------------------------------------------------------------------------
1 | """
2 | Python MySQL Replication:
3 | Pure Python Implementation of MySQL replication protocol build on top of
4 | PyMYSQL.
5 |
6 | Licence
7 | =======
8 | Copyright 2012 Julien Duponchelle
9 |
10 | Licensed under the Apache License, Version 2.0 (the "License");
11 | you may not use this file except in compliance with the License.
12 | You may obtain a copy of the License at
13 |
14 | http://www.apache.org/licenses/LICENSE-2.0
15 |
16 | Unless required by applicable law or agreed to in writing, software
17 | distributed under the License is distributed on an "AS IS" BASIS,
18 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19 | See the License for the specific language governing permissions and
20 | limitations under the License.
21 | """
22 |
23 | from .binlogstream import BinLogStreamReader
24 |
--------------------------------------------------------------------------------
/pymysqlreplication/binlogstream.py:
--------------------------------------------------------------------------------
1 | import struct
2 | import logging
3 | from distutils.version import LooseVersion
4 |
5 | import pymysql
6 | from pymysql.constants.COMMAND import COM_BINLOG_DUMP, COM_REGISTER_SLAVE
7 | from pymysql.cursors import DictCursor
8 |
9 | from .constants.BINLOG import TABLE_MAP_EVENT, ROTATE_EVENT, FORMAT_DESCRIPTION_EVENT
10 | from .event import (
11 | QueryEvent,
12 | RotateEvent,
13 | FormatDescriptionEvent,
14 | XidEvent,
15 | GtidEvent,
16 | StopEvent,
17 | XAPrepareEvent,
18 | BeginLoadQueryEvent,
19 | ExecuteLoadQueryEvent,
20 | HeartbeatLogEvent,
21 | NotImplementedEvent,
22 | MariadbGtidEvent,
23 | MariadbAnnotateRowsEvent,
24 | RandEvent,
25 | MariadbStartEncryptionEvent,
26 | RowsQueryLogEvent,
27 | MariadbGtidListEvent,
28 | MariadbBinLogCheckPointEvent,
29 | UserVarEvent,
30 | PreviousGtidsEvent,
31 | )
32 | from .exceptions import BinLogNotEnabled
33 | from .gtid import GtidSet
34 | from .packet import BinLogPacketWrapper
35 | from .row_event import UpdateRowsEvent, WriteRowsEvent, DeleteRowsEvent, TableMapEvent
36 |
37 | try:
38 | from pymysql.constants.COMMAND import COM_BINLOG_DUMP_GTID
39 | except ImportError:
40 | # Handle old pymysql versions
41 | # See: https://github.com/PyMySQL/PyMySQL/pull/261
42 | COM_BINLOG_DUMP_GTID = 0x1E
43 |
44 | # 2013 Connection Lost
45 | # 2006 MySQL server has gone away
46 | MYSQL_EXPECTED_ERROR_CODES = [2013, 2006]
47 |
48 |
49 | class ReportSlave(object):
50 | """Represent the values that you may report when connecting as a slave
51 | to a master. SHOW SLAVE HOSTS related"""
52 |
53 | hostname = ""
54 | username = ""
55 | password = ""
56 | port = 0
57 |
58 | def __init__(self, value):
59 | """
60 | Attributes:
61 | value: string or tuple
62 | if string, then it will be used hostname
63 | if tuple it will be used as (hostname, user, password, port)
64 | """
65 |
66 | if isinstance(value, (tuple, list)):
67 | try:
68 | self.hostname = value[0]
69 | self.username = value[1]
70 | self.password = value[2]
71 | self.port = int(value[3])
72 | except IndexError:
73 | pass
74 | elif isinstance(value, dict):
75 | for key in ["hostname", "username", "password", "port"]:
76 | try:
77 | setattr(self, key, value[key])
78 | except KeyError:
79 | pass
80 | else:
81 | self.hostname = value
82 |
83 | def __repr__(self):
84 | return "" % (
85 | self.hostname,
86 | self.username,
87 | self.password,
88 | self.port,
89 | )
90 |
91 | def encoded(self, server_id, master_id=0):
92 | """
93 | server_id: the slave server-id
94 | master_id: usually 0. Appears as "master id" in SHOW SLAVE HOSTS
95 | on the master. Unknown what else it impacts.
96 | """
97 |
98 | # 1 [15] COM_REGISTER_SLAVE
99 | # 4 server-id
100 | # 1 slaves hostname length
101 | # string[$len] slaves hostname
102 | # 1 slaves user len
103 | # string[$len] slaves user
104 | # 1 slaves password len
105 | # string[$len] slaves password
106 | # 2 slaves mysql-port
107 | # 4 replication rank
108 | # 4 master-id
109 |
110 | lhostname = len(self.hostname.encode())
111 | lusername = len(self.username.encode())
112 | lpassword = len(self.password.encode())
113 |
114 | packet_len = (
115 | 1
116 | + 4 # command
117 | + 1 # server-id
118 | + lhostname # hostname length
119 | + 1
120 | + lusername # username length
121 | + 1
122 | + lpassword # password length
123 | + 2
124 | + 4 # slave mysql port
125 | + 4 # replication rank
126 | ) # master-id
127 |
128 | MAX_STRING_LEN = 257 # one byte for length + 256 chars
129 |
130 | return (
131 | struct.pack(" 5.6"""
308 | cur = self._stream_connection.cursor()
309 | cur.execute("SHOW GLOBAL VARIABLES LIKE 'BINLOG_CHECKSUM'")
310 | result = cur.fetchone()
311 | cur.close()
312 |
313 | if result is None:
314 | return False
315 | var, value = result[:2]
316 | if value == "NONE":
317 | return False
318 | return True
319 |
320 | def _register_slave(self):
321 | if not self.report_slave:
322 | return
323 |
324 | packet = self.report_slave.encoded(self.__server_id)
325 |
326 | if pymysql.__version__ < LooseVersion("0.6"):
327 | self._stream_connection.wfile.write(packet)
328 | self._stream_connection.wfile.flush()
329 | self._stream_connection.read_packet()
330 | else:
331 | self._stream_connection._write_bytes(packet)
332 | self._stream_connection._next_seq_id = 1
333 | self._stream_connection._read_packet()
334 |
335 | def __connect_to_stream(self):
336 | # log_pos (4) -- position in the binlog-file to start the stream with
337 | # flags (2) BINLOG_DUMP_NON_BLOCK (0 or 1)
338 | # server_id (4) -- server id of this slave
339 | # log_file (string.EOF) -- filename of the binlog on the master
340 | self._stream_connection = self.pymysql_wrapper(**self.__connection_settings)
341 |
342 | self.__use_checksum = self.__checksum_enabled()
343 |
344 | # If checksum is enabled we need to inform the server about the that
345 | # we support it
346 | if self.__use_checksum:
347 | cur = self._stream_connection.cursor()
348 | cur.execute("SET @master_binlog_checksum= @@global.binlog_checksum")
349 | cur.close()
350 |
351 | if self.slave_uuid:
352 | cur = self._stream_connection.cursor()
353 | cur.execute(
354 | f"SET @slave_uuid = {self.slave_uuid}, @replica_uuid = {self.slave_uuid}"
355 | )
356 | cur.close()
357 |
358 | if self.slave_heartbeat:
359 | # 4294967 is documented as the max value for heartbeats
360 | net_timeout = float(self.__connection_settings.get("read_timeout", 4294967))
361 | # If heartbeat is too low, the connection will disconnect before,
362 | # this is also the behavior in mysql
363 | heartbeat = float(min(net_timeout / 2.0, self.slave_heartbeat))
364 | if heartbeat > 4294967:
365 | heartbeat = 4294967
366 |
367 | # master_heartbeat_period is nanoseconds
368 | heartbeat = int(heartbeat * 1000000000)
369 | cur = self._stream_connection.cursor()
370 | cur.execute("SET @master_heartbeat_period= %d" % heartbeat)
371 | cur.close()
372 |
373 | # When replicating from Mariadb 10.6.12 using binlog coordinates, a slave capability < 4 triggers a bug in
374 | # Mariadb, when it tries to replace GTID events with dummy ones. Given that this library understands GTID
375 | # events, setting the capability to 4 circumvents this error.
376 | # If the DB is mysql, this won't have any effect so no need to run this in a condition
377 | cur = self._stream_connection.cursor()
378 | cur.execute("SET @mariadb_slave_capability=4")
379 | cur.close()
380 |
381 | self._register_slave()
382 |
383 | if not self.auto_position:
384 | if self.is_mariadb:
385 | prelude = self.__set_mariadb_settings()
386 | else:
387 | # only when log_file and log_pos both provided, the position info is
388 | # valid, if not, get the current position from master
389 | if self.log_file is None or self.log_pos is None:
390 | cur = self._stream_connection.cursor()
391 | cur.execute("SHOW MASTER STATUS")
392 | master_status = cur.fetchone()
393 | if master_status is None:
394 | raise BinLogNotEnabled()
395 | self.log_file, self.log_pos = master_status[:2]
396 | cur.close()
397 |
398 | prelude = struct.pack("= self.end_log_pos:
637 | # We're currently at, or past, the specified end log position.
638 | self.is_past_end_log_pos = True
639 |
640 | # This check must not occur before clearing the ``table_map`` as a
641 | # result of a RotateEvent.
642 | #
643 | # The first RotateEvent in a binlog file has a timestamp of
644 | # zero. If the server has moved to a new log and not written a
645 | # timestamped RotateEvent at the end of the previous log, the
646 | # RotateEvent at the beginning of the new log will be ignored
647 | # if the caller provided a positive ``skip_to_timestamp``
648 | # value. This will result in the ``table_map`` becoming
649 | # corrupt.
650 | #
651 | # https://dev.mysql.com/doc/internals/en/event-data-for-specific-event-types.html
652 | # From the MySQL Internals Manual:
653 | #
654 | # ROTATE_EVENT is generated locally and written to the binary
655 | # log on the master. It is written to the relay log on the
656 | # slave when FLUSH LOGS occurs, and when receiving a
657 | # ROTATE_EVENT from the master. In the latter case, there
658 | # will be two rotate events in total originating on different
659 | # servers.
660 | #
661 | # There are conditions under which the terminating
662 | # log-rotation event does not occur. For example, the server
663 | # might crash.
664 | if (
665 | self.skip_to_timestamp
666 | and binlog_event.timestamp < self.skip_to_timestamp
667 | ):
668 | continue
669 |
670 | if (
671 | binlog_event.event_type == TABLE_MAP_EVENT
672 | and binlog_event.event is not None
673 | ):
674 | self.table_map[
675 | binlog_event.event.table_id
676 | ] = binlog_event.event.get_table()
677 |
678 | # event is none if we have filter it on packet level
679 | # we filter also not allowed events
680 | if binlog_event.event is None or (
681 | binlog_event.event.__class__ not in self.__allowed_events
682 | ):
683 | continue
684 |
685 | if binlog_event.event_type == FORMAT_DESCRIPTION_EVENT:
686 | self.mysql_version = binlog_event.event.mysql_version
687 |
688 | return binlog_event.event
689 |
690 | def _allowed_event_list(
691 | self, only_events, ignored_events, filter_non_implemented_events
692 | ):
693 | if only_events is not None:
694 | events = set(only_events)
695 | else:
696 | events = set(
697 | (
698 | QueryEvent,
699 | RotateEvent,
700 | StopEvent,
701 | FormatDescriptionEvent,
702 | XAPrepareEvent,
703 | XidEvent,
704 | GtidEvent,
705 | BeginLoadQueryEvent,
706 | ExecuteLoadQueryEvent,
707 | UpdateRowsEvent,
708 | WriteRowsEvent,
709 | DeleteRowsEvent,
710 | TableMapEvent,
711 | HeartbeatLogEvent,
712 | NotImplementedEvent,
713 | MariadbGtidEvent,
714 | RowsQueryLogEvent,
715 | MariadbAnnotateRowsEvent,
716 | RandEvent,
717 | MariadbStartEncryptionEvent,
718 | MariadbGtidListEvent,
719 | MariadbBinLogCheckPointEvent,
720 | UserVarEvent,
721 | PreviousGtidsEvent,
722 | )
723 | )
724 | if ignored_events is not None:
725 | for e in ignored_events:
726 | events.remove(e)
727 | if filter_non_implemented_events:
728 | try:
729 | events.remove(NotImplementedEvent)
730 | except KeyError:
731 | pass
732 | return frozenset(events)
733 |
734 | def __get_dbms(self):
735 | if not self.__connected_ctl:
736 | self.__connect_to_ctl()
737 |
738 | cur = self._ctl_connection.cursor()
739 | cur.execute("SELECT VERSION();")
740 |
741 | version_info = cur.fetchone().get("VERSION()", "")
742 |
743 | if "MariaDB" in version_info:
744 | return "mariadb"
745 | return "mysql"
746 |
747 | def __log_valid_parameters(self):
748 | ignored = ["allowed_events", "table_map"]
749 | for parameter, value in self.__dict__.items():
750 | if parameter.startswith("_BinLogStreamReader__"):
751 | parameter = parameter.replace("_BinLogStreamReader__", "")
752 | if parameter in ignored or not value:
753 | continue
754 | if type(value) == frozenset:
755 | string_list = [
756 | str(item).split()[-1][:-2].split(".")[2] for item in value
757 | ]
758 | items = ", ".join(string_list)
759 | comment = f"{parameter}: [{items}]"
760 | else:
761 | comment = f"{parameter}: {value}"
762 | logging.info(comment)
763 |
764 | def __iter__(self):
765 | return iter(self.fetchone, None)
766 |
--------------------------------------------------------------------------------
/pymysqlreplication/bitmap.py:
--------------------------------------------------------------------------------
1 | bitCountInByte = [
2 | 0,
3 | 1,
4 | 1,
5 | 2,
6 | 1,
7 | 2,
8 | 2,
9 | 3,
10 | 1,
11 | 2,
12 | 2,
13 | 3,
14 | 2,
15 | 3,
16 | 3,
17 | 4,
18 | 1,
19 | 2,
20 | 2,
21 | 3,
22 | 2,
23 | 3,
24 | 3,
25 | 4,
26 | 2,
27 | 3,
28 | 3,
29 | 4,
30 | 3,
31 | 4,
32 | 4,
33 | 5,
34 | 1,
35 | 2,
36 | 2,
37 | 3,
38 | 2,
39 | 3,
40 | 3,
41 | 4,
42 | 2,
43 | 3,
44 | 3,
45 | 4,
46 | 3,
47 | 4,
48 | 4,
49 | 5,
50 | 2,
51 | 3,
52 | 3,
53 | 4,
54 | 3,
55 | 4,
56 | 4,
57 | 5,
58 | 3,
59 | 4,
60 | 4,
61 | 5,
62 | 4,
63 | 5,
64 | 5,
65 | 6,
66 | 1,
67 | 2,
68 | 2,
69 | 3,
70 | 2,
71 | 3,
72 | 3,
73 | 4,
74 | 2,
75 | 3,
76 | 3,
77 | 4,
78 | 3,
79 | 4,
80 | 4,
81 | 5,
82 | 2,
83 | 3,
84 | 3,
85 | 4,
86 | 3,
87 | 4,
88 | 4,
89 | 5,
90 | 3,
91 | 4,
92 | 4,
93 | 5,
94 | 4,
95 | 5,
96 | 5,
97 | 6,
98 | 2,
99 | 3,
100 | 3,
101 | 4,
102 | 3,
103 | 4,
104 | 4,
105 | 5,
106 | 3,
107 | 4,
108 | 4,
109 | 5,
110 | 4,
111 | 5,
112 | 5,
113 | 6,
114 | 3,
115 | 4,
116 | 4,
117 | 5,
118 | 4,
119 | 5,
120 | 5,
121 | 6,
122 | 4,
123 | 5,
124 | 5,
125 | 6,
126 | 5,
127 | 6,
128 | 6,
129 | 7,
130 | 1,
131 | 2,
132 | 2,
133 | 3,
134 | 2,
135 | 3,
136 | 3,
137 | 4,
138 | 2,
139 | 3,
140 | 3,
141 | 4,
142 | 3,
143 | 4,
144 | 4,
145 | 5,
146 | 2,
147 | 3,
148 | 3,
149 | 4,
150 | 3,
151 | 4,
152 | 4,
153 | 5,
154 | 3,
155 | 4,
156 | 4,
157 | 5,
158 | 4,
159 | 5,
160 | 5,
161 | 6,
162 | 2,
163 | 3,
164 | 3,
165 | 4,
166 | 3,
167 | 4,
168 | 4,
169 | 5,
170 | 3,
171 | 4,
172 | 4,
173 | 5,
174 | 4,
175 | 5,
176 | 5,
177 | 6,
178 | 3,
179 | 4,
180 | 4,
181 | 5,
182 | 4,
183 | 5,
184 | 5,
185 | 6,
186 | 4,
187 | 5,
188 | 5,
189 | 6,
190 | 5,
191 | 6,
192 | 6,
193 | 7,
194 | 2,
195 | 3,
196 | 3,
197 | 4,
198 | 3,
199 | 4,
200 | 4,
201 | 5,
202 | 3,
203 | 4,
204 | 4,
205 | 5,
206 | 4,
207 | 5,
208 | 5,
209 | 6,
210 | 3,
211 | 4,
212 | 4,
213 | 5,
214 | 4,
215 | 5,
216 | 5,
217 | 6,
218 | 4,
219 | 5,
220 | 5,
221 | 6,
222 | 5,
223 | 6,
224 | 6,
225 | 7,
226 | 3,
227 | 4,
228 | 4,
229 | 5,
230 | 4,
231 | 5,
232 | 5,
233 | 6,
234 | 4,
235 | 5,
236 | 5,
237 | 6,
238 | 5,
239 | 6,
240 | 6,
241 | 7,
242 | 4,
243 | 5,
244 | 5,
245 | 6,
246 | 5,
247 | 6,
248 | 6,
249 | 7,
250 | 5,
251 | 6,
252 | 6,
253 | 7,
254 | 6,
255 | 7,
256 | 7,
257 | 8,
258 | ]
259 |
260 |
261 | # Calculate total bit counts in a bitmap
262 | def BitCount(bitmap):
263 | n = 0
264 | for i in range(0, len(bitmap)):
265 | bit = bitmap[i]
266 | if isinstance(bit, str):
267 | bit = ord(bit)
268 | n += bitCountInByte[bit]
269 | return n
270 |
271 |
272 | # Get the bit set at offset position in bitmap
273 | def BitGet(bitmap, position):
274 | bit = bitmap[int(position / 8)]
275 | if isinstance(bit, str):
276 | bit = ord(bit)
277 | return bit & (1 << (position & 7))
278 |
--------------------------------------------------------------------------------
/pymysqlreplication/column.py:
--------------------------------------------------------------------------------
1 | import struct
2 |
3 | from .constants import FIELD_TYPE
4 |
5 |
6 | class Column(object):
7 | """Definition of a column"""
8 |
9 | def __init__(self, *args, **kwargs):
10 | if len(args) == 2:
11 | self.__parse_column_definition(*args)
12 | else:
13 | self.__dict__.update(kwargs)
14 |
15 | def __parse_column_definition(self, column_type, packet):
16 | self.type = column_type
17 | self.name = None
18 | self.unsigned = False
19 | self.is_primary = False
20 | self.charset_id = None
21 | self.character_set_name = None
22 | self.collation_name = None
23 | self.enum_values = None
24 | self.set_values = None
25 | self.visibility = False
26 |
27 | if self.type == FIELD_TYPE.VARCHAR:
28 | self.max_length = struct.unpack("> 8
59 | if real_type == FIELD_TYPE.SET or real_type == FIELD_TYPE.ENUM:
60 | self.type = real_type
61 | self.size = metadata & 0x00FF
62 | else:
63 | self.max_length = (((metadata >> 4) & 0x300) ^ 0x300) + (metadata & 0x00FF)
64 |
65 | def __eq__(self, other):
66 | return self.data == other.data
67 |
68 | def __ne__(self, other):
69 | return not self.__eq__(other)
70 |
71 | def serializable_data(self):
72 | return self.data
73 |
74 | @property
75 | def data(self):
76 | return dict((k, v) for (k, v) in self.__dict__.items() if not k.startswith("_"))
77 |
--------------------------------------------------------------------------------
/pymysqlreplication/constants/BINLOG.py:
--------------------------------------------------------------------------------
1 | UNKNOWN_EVENT = 0x00
2 | START_EVENT_V3 = 0x01
3 | QUERY_EVENT = 0x02
4 | STOP_EVENT = 0x03
5 | ROTATE_EVENT = 0x04
6 | INTVAR_EVENT = 0x05
7 | LOAD_EVENT = 0x06
8 | SLAVE_EVENT = 0x07
9 | CREATE_FILE_EVENT = 0x08
10 | APPEND_BLOCK_EVENT = 0x09
11 | EXEC_LOAD_EVENT = 0x0A
12 | DELETE_FILE_EVENT = 0x0B
13 | NEW_LOAD_EVENT = 0x0C
14 | RAND_EVENT = 0x0D
15 | USER_VAR_EVENT = 0x0E
16 | FORMAT_DESCRIPTION_EVENT = 0x0F
17 | XID_EVENT = 0x10
18 | BEGIN_LOAD_QUERY_EVENT = 0x11
19 | EXECUTE_LOAD_QUERY_EVENT = 0x12
20 | TABLE_MAP_EVENT = 0x13
21 | PRE_GA_WRITE_ROWS_EVENT = 0x14
22 | PRE_GA_UPDATE_ROWS_EVENT = 0x15
23 | PRE_GA_DELETE_ROWS_EVENT = 0x16
24 | WRITE_ROWS_EVENT_V1 = 0x17
25 | UPDATE_ROWS_EVENT_V1 = 0x18
26 | DELETE_ROWS_EVENT_V1 = 0x19
27 | INCIDENT_EVENT = 0x1A
28 | HEARTBEAT_LOG_EVENT = 0x1B
29 | IGNORABLE_LOG_EVENT = 0x1C
30 | ROWS_QUERY_LOG_EVENT = 0x1D
31 | WRITE_ROWS_EVENT_V2 = 0x1E
32 | UPDATE_ROWS_EVENT_V2 = 0x1F
33 | DELETE_ROWS_EVENT_V2 = 0x20
34 | GTID_LOG_EVENT = 0x21
35 | ANONYMOUS_GTID_LOG_EVENT = 0x22
36 | PREVIOUS_GTIDS_LOG_EVENT = 0x23
37 | XA_PREPARE_EVENT = 0x26
38 |
39 | # INTVAR types
40 | INTVAR_INVALID_INT_EVENT = 0x00
41 | INTVAR_LAST_INSERT_ID_EVENT = 0x01
42 | INTVAR_INSERT_ID_EVENT = 0x02
43 |
44 | # MariaDB events
45 |
46 | MARIADB_ANNOTATE_ROWS_EVENT = 0xA0
47 | MARIADB_BINLOG_CHECKPOINT_EVENT = 0xA1
48 | MARIADB_GTID_EVENT = 0xA2
49 | MARIADB_GTID_GTID_LIST_EVENT = 0xA3
50 | MARIADB_START_ENCRYPTION_EVENT = 0xA4
51 |
52 |
53 | # Common-Footer
54 | BINLOG_CHECKSUM_LEN = 4
55 |
--------------------------------------------------------------------------------
/pymysqlreplication/constants/FIELD_TYPE.py:
--------------------------------------------------------------------------------
1 | # Original code from PyMySQL
2 | # Copyright (c) 2010 PyMySQL contributors
3 | #
4 | # Permission is hereby granted, free of charge, to any person obtaining a copy
5 | # of this software and associated documentation files (the "Software"), to deal
6 | # in the Software without restriction, including without limitation the rights
7 | # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 | # copies of the Software, and to permit persons to whom the Software is
9 | # furnished to do so, subject to the following conditions:
10 | #
11 | # The above copyright notice and this permission notice shall be included in
12 | # all copies or substantial portions of the Software.
13 | #
14 | # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 | # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 | # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 | # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 | # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 | # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20 | # THE SOFTWARE.
21 |
22 | DECIMAL = 0
23 | TINY = 1
24 | SHORT = 2
25 | LONG = 3
26 | FLOAT = 4
27 | DOUBLE = 5
28 | NULL = 6
29 | TIMESTAMP = 7
30 | LONGLONG = 8
31 | INT24 = 9
32 | DATE = 10
33 | TIME = 11
34 | DATETIME = 12
35 | YEAR = 13
36 | NEWDATE = 14
37 | VARCHAR = 15
38 | BIT = 16
39 | TIMESTAMP2 = 17
40 | DATETIME2 = 18
41 | TIME2 = 19
42 | JSON = 245 # Introduced in 5.7.8
43 | NEWDECIMAL = 246
44 | ENUM = 247
45 | SET = 248
46 | TINY_BLOB = 249
47 | MEDIUM_BLOB = 250
48 | LONG_BLOB = 251
49 | BLOB = 252
50 | VAR_STRING = 253
51 | STRING = 254
52 | GEOMETRY = 255
53 |
54 | CHAR = TINY
55 | INTERVAL = ENUM
56 |
--------------------------------------------------------------------------------
/pymysqlreplication/constants/NONE_SOURCE.py:
--------------------------------------------------------------------------------
1 | NULL = "null"
2 | OUT_OF_DATE_RANGE = "out of date range"
3 | OUT_OF_DATETIME_RANGE = "out of datetime range"
4 | OUT_OF_DATETIME2_RANGE = "out of datetime2 range"
5 | EMPTY_SET = "empty set"
6 | COLS_BITMAP = "cols bitmap"
7 |
--------------------------------------------------------------------------------
/pymysqlreplication/constants/STATUS_VAR_KEY.py:
--------------------------------------------------------------------------------
1 | # from enum import IntEnum
2 |
3 | # class StatusVarsKey(IntEnum):
4 | """List of Query_event_status_vars
5 |
6 | A status variable in query events is a sequence of status KEY-VALUE pairs.
7 | The class variables enumerated below are KEYs.
8 | Each KEY determines the length of corresponding VALUE.
9 |
10 | For further details refer to:
11 | mysql-server: https://github.com/mysql/mysql-server/blob/beb865a960b9a8a16cf999c323e46c5b0c67f21f/libbinlogevents/include/statement_events.h#L463-L532
12 | MySQL Documentation: https://dev.mysql.com/doc/internals/en/query-event.html
13 |
14 | Status variable key names From mysql-server source code, edited by dongwook-chan
15 | """
16 |
17 | # KEY
18 | Q_FLAGS2_CODE = 0x00
19 | Q_SQL_MODE_CODE = 0x01
20 | Q_CATALOG_CODE = 0x02
21 | Q_AUTO_INCREMENT = 0x03
22 | Q_CHARSET_CODE = 0x04
23 | Q_TIME_ZONE_CODE = 0x05
24 | Q_CATALOG_NZ_CODE = 0x06
25 | Q_LC_TIME_NAMES_CODE = 0x07
26 | Q_CHARSET_DATABASE_CODE = 0x08
27 | Q_TABLE_MAP_FOR_UPDATE_CODE = 0x09
28 | Q_MASTER_DATA_WRITTEN_CODE = 0x0A
29 | Q_INVOKER = 0x0B
30 | Q_UPDATED_DB_NAMES = 0x0C # MySQL only
31 | Q_MICROSECONDS = 0x0D # MySQL only
32 | Q_COMMIT_TS = 0x0E
33 | Q_COMMIT_TS2 = 0x0F
34 | Q_EXPLICIT_DEFAULTS_FOR_TIMESTAMP = 0x10
35 | Q_DDL_LOGGED_WITH_XID = 0x11
36 | Q_DEFAULT_COLLATION_FOR_UTF8MB4 = 0x12
37 | Q_SQL_REQUIRE_PRIMARY_KEY = 0x13
38 | Q_DEFAULT_TABLE_ENCRYPTION = 0x14
39 | Q_HRNOW = 0x80 # MariaDB only
40 | Q_XID = 0x81 # MariaDB only
41 |
--------------------------------------------------------------------------------
/pymysqlreplication/constants/__init__.py:
--------------------------------------------------------------------------------
1 | from .BINLOG import *
2 | from .FIELD_TYPE import *
3 | from .STATUS_VAR_KEY import *
4 |
--------------------------------------------------------------------------------
/pymysqlreplication/exceptions.py:
--------------------------------------------------------------------------------
1 | class TableMetadataUnavailableError(Exception):
2 | def __init__(self, table):
3 | Exception.__init__(self, f"Unable to find metadata for table {table}")
4 |
5 |
6 | class BinLogNotEnabled(Exception):
7 | def __init__(self):
8 | Exception.__init__(self, "MySQL binary logging is not enabled.")
9 |
10 |
11 | class StatusVariableMismatch(Exception):
12 | def __init__(self):
13 | Exception.__init__(
14 | self,
15 | " ".join(
16 | (
17 | "Unknown status variable in query event.",
18 | "Possible parse failure in preceding fields",
19 | "or outdated constants.STATUS_VAR_KEY",
20 | "Refer to MySQL documentation/source code",
21 | "or create an issue on GitHub",
22 | )
23 | ),
24 | )
25 |
--------------------------------------------------------------------------------
/pymysqlreplication/gtid.py:
--------------------------------------------------------------------------------
1 | import re
2 | import struct
3 | import binascii
4 | from copy import deepcopy
5 | from io import BytesIO
6 |
7 |
8 | def overlap(i1, i2):
9 | return i1[0] < i2[1] and i1[1] > i2[0]
10 |
11 |
12 | def contains(i1, i2):
13 | return i2[0] >= i1[0] and i2[1] <= i1[1]
14 |
15 |
16 | class Gtid(object):
17 | """A mysql GTID is composed of a server-id and a set of right-open
18 | intervals [a,b), and represent all transactions x that happened on
19 | server SID such as
20 |
21 | a <= x < b
22 |
23 | The human representation of it, though, is either represented by a
24 | single transaction number A=a (when only one transaction is covered,
25 | ie b = a+1)
26 |
27 | SID:A
28 |
29 | Or a closed interval [A,B] for at least two transactions (note, in that
30 | case, that b=B+1)
31 |
32 | SID:A-B
33 |
34 | We can also have a mix of ranges for a given SID:
35 | SID:1-2:4:6-74
36 |
37 | For convenience, a Gtid accepts adding Gtid's to it and will merge
38 | the existing interval representation. Adding TXN 3 to the human
39 | representation above would produce:
40 |
41 | SID:1-4:6-74
42 |
43 | and adding 5 to this new result:
44 |
45 | SID:1-74
46 |
47 | Raises:
48 | ValueError: If construction parsing from string fails
49 | Exception: Adding an already present transaction number (one that overlaps).
50 | Exception: Adding a Gtid with a different SID.
51 | """
52 |
53 | @staticmethod
54 | def parse_interval(interval):
55 | """
56 | We parse a human-generated string here. So our end value b
57 | is incremented to conform to the internal representation format.
58 |
59 | Raises:
60 | - ValueError if GTID format is incorrect
61 | """
62 | m = re.search("^([0-9]+)(?:-([0-9]+))?$", interval)
63 | if not m:
64 | raise ValueError(f"GTID format is incorrect: {interval!r}")
65 | a = int(m.group(1))
66 | b = int(m.group(2) or a)
67 | return a, b + 1
68 |
69 | @staticmethod
70 | def parse(gtid):
71 | """Parse a GTID from mysql textual format.
72 |
73 | Raises:
74 | - ValueError: if GTID format is incorrect.
75 | """
76 | m = re.search(
77 | "^([0-9a-fA-F]{8}(?:-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12})"
78 | "((?::[0-9-]+)+)$",
79 | gtid,
80 | )
81 | if not m:
82 | raise ValueError(f"GTID format is incorrect: {gtid!r}")
83 |
84 | sid = m.group(1)
85 | intervals = m.group(2)
86 |
87 | intervals_parsed = [Gtid.parse_interval(x) for x in intervals.split(":")[1:]]
88 |
89 | return sid, intervals_parsed
90 |
91 | def __add_interval(self, itvl):
92 | """
93 | Use the internal representation format and add it
94 | to our intervals, merging if required.
95 |
96 | Raises:
97 | Exception: if Malformated interval or Overlapping interval
98 | """
99 | new = []
100 |
101 | if itvl[0] > itvl[1]:
102 | raise Exception(f"Malformed interval {itvl}")
103 |
104 | if any(overlap(x, itvl) for x in self.intervals):
105 | raise Exception(f"Overlapping interval {itvl}")
106 |
107 | ## Merge: arrange interval to fit existing set
108 | for existing in sorted(self.intervals):
109 | if itvl[0] == existing[1]:
110 | itvl = (existing[0], itvl[1])
111 | continue
112 |
113 | if itvl[1] == existing[0]:
114 | itvl = (itvl[0], existing[1])
115 | continue
116 |
117 | new.append(existing)
118 |
119 | self.intervals = sorted(new + [itvl])
120 |
121 | def __sub_interval(self, itvl):
122 | """Using the internal representation, remove an interval
123 |
124 | Raises: Exception if itvl malformated"""
125 | new = []
126 |
127 | if itvl[0] > itvl[1]:
128 | raise Exception(f"Malformed interval {itvl}")
129 |
130 | if not any(overlap(x, itvl) for x in self.intervals):
131 | # No raise
132 | return
133 |
134 | ## Merge: arrange existing set around interval
135 | for existing in sorted(self.intervals):
136 | if overlap(existing, itvl):
137 | if existing[0] < itvl[0]:
138 | new.append((existing[0], itvl[0]))
139 | if existing[1] > itvl[1]:
140 | new.append((itvl[1], existing[1]))
141 | else:
142 | new.append(existing)
143 |
144 | self.intervals = new
145 |
146 | def __contains__(self, other):
147 | """Test if other is contained within self.
148 | First we compare sid they must be equals.
149 |
150 | Then we search if intervals from other are contained within
151 | self intervals.
152 | """
153 | if other.sid != self.sid:
154 | return False
155 |
156 | return all(
157 | any(contains(me, them) for me in self.intervals) for them in other.intervals
158 | )
159 |
160 | def __init__(self, gtid, sid=None, intervals=[]):
161 | if sid:
162 | intervals = intervals
163 | else:
164 | sid, intervals = Gtid.parse(gtid)
165 |
166 | self.sid = sid
167 | self.intervals = []
168 | for itvl in intervals:
169 | self.__add_interval(itvl)
170 |
171 | def __add__(self, other):
172 | """Include the transactions of this gtid.
173 |
174 | Raises:
175 | Exception: if the attempted merge has different SID"""
176 | if self.sid != other.sid:
177 | raise Exception(f"Attempt to merge different SID {self.sid} != {other.sid}")
178 |
179 | result = deepcopy(self)
180 |
181 | for itvl in other.intervals:
182 | result.__add_interval(itvl)
183 |
184 | return result
185 |
186 | def __sub__(self, other):
187 | """Remove intervals. Do not raise, if different SID simply
188 | ignore"""
189 | result = deepcopy(self)
190 | if self.sid != other.sid:
191 | return result
192 |
193 | for itvl in other.intervals:
194 | result.__sub_interval(itvl)
195 |
196 | return result
197 |
198 | def __str__(self):
199 | """We represent the human value here - a single number
200 | for one transaction, or a closed interval (decrementing b)"""
201 |
202 | def format_interval(x):
203 | if x[0] + 1 != x[1]:
204 | return f"{x[0]}-{x[1] - 1}"
205 | return str(x[0])
206 |
207 | interval_string = ":".join(map(format_interval, self.intervals))
208 | return f"{self.sid}:{interval_string}"
209 |
210 | def __repr__(self):
211 | return f''
212 |
213 | @property
214 | def encoded_length(self):
215 | return (
216 | 16
217 | + 8 # sid
218 | + 2 # n_intervals
219 | * 8 # stop/start
220 | * len(self.intervals) # stop/start mark encoded as int64
221 | )
222 |
223 | def encode(self):
224 | """Encode a Gtid in binary
225 | Bytes are in **little endian**.
226 |
227 | Format:
228 |
229 | - sid will be uncoded as hex-binary without the dashes as a [u8; 16]
230 | - size of the interval list as a u64
231 | - all the interval as a pair: (start: u64, end: u64).
232 |
233 | ## Diagram
234 |
235 | ```txt
236 | Alligned on u64 bit.
237 | +-+-+-+-+-+-+-+-+-+-+
238 | | sid [16;u8] |
239 | | |
240 | +-+-+-+-+-+-+-+-+-+-+
241 | | intervals_len u64 |
242 | +-+-+-+-+-+-+-+-+-+-+
243 | |start u64 <-+
244 | - - - - - - - - - - - + Repeated
245 | |stop u64 <-+ interval_len times
246 | - - - - - - - - - - -
247 | ```
248 | """
249 | buffer = b""
250 | # sid
251 | buffer += binascii.unhexlify(self.sid.replace("-", ""))
252 | # n_intervals
253 | buffer += struct.pack(" other.sid
314 | return self.intervals > other.intervals
315 |
316 | def __ge__(self, other):
317 | if other.sid != self.sid:
318 | return self.sid >= other.sid
319 | return self.intervals >= other.intervals
320 |
321 |
322 | class GtidSet(object):
323 | """Represents a set of Gtid"""
324 |
325 | def __init__(self, gtid_set):
326 | """
327 | Construct a GtidSet initial state depends of the nature of `gtid_set` param.
328 |
329 | params:
330 | - gtid_set:
331 | - None: then the GtidSet start empty
332 | - a set of Gtid either as a their textual representation separated by comma
333 | - A set or list of gtid
334 | - A GTID alone.
335 |
336 | Raises:
337 | - ValueError: if `gtid_set` is a string separated with comma, but with malformated Gtid.
338 | - Exception: if Gtid interval are either malformated or overlapping
339 | """
340 |
341 | def _to_gtid(element):
342 | if isinstance(element, Gtid):
343 | return element
344 | return Gtid(element.strip(" \n"))
345 |
346 | if not gtid_set:
347 | self.gtids = []
348 | elif isinstance(gtid_set, (list, set)):
349 | self.gtids = [_to_gtid(x) for x in gtid_set]
350 | else:
351 | self.gtids = [Gtid(x.strip(" \n")) for x in gtid_set.split(",")]
352 |
353 | def merge_gtid(self, gtid):
354 | """Insert a Gtid in current GtidSet."""
355 | new_gtids = []
356 | for existing in self.gtids:
357 | if existing.sid == gtid.sid:
358 | new_gtids.append(existing + gtid)
359 | else:
360 | new_gtids.append(existing)
361 | if gtid.sid not in (x.sid for x in new_gtids):
362 | new_gtids.append(gtid)
363 | self.gtids = new_gtids
364 |
365 | def __contains__(self, other):
366 | """
367 | Test if self contains other, could be a GtidSet or a Gtid.
368 |
369 | Raises:
370 | - NotImplementedError other is not a GtidSet neither a Gtid,
371 | please convert it first to one of them
372 | """
373 | if isinstance(other, GtidSet):
374 | return all(other_gtid in self.gtids for other_gtid in other.gtids)
375 | if isinstance(other, Gtid):
376 | return any(other in x for x in self.gtids)
377 | raise NotImplementedError
378 |
379 | def __add__(self, other):
380 | """
381 | Merge current instance with an other GtidSet or with a Gtid alone.
382 |
383 | Raises:
384 | - NotImplementedError other is not a GtidSet neither a Gtid,
385 | please convert it first to one of them
386 | """
387 | if isinstance(other, Gtid):
388 | new = GtidSet(self.gtids)
389 | new.merge_gtid(other)
390 | return new
391 |
392 | if isinstance(other, GtidSet):
393 | new = GtidSet(self.gtids)
394 | for gtid in other.gtids:
395 | new.merge_gtid(gtid)
396 | return new
397 |
398 | raise NotImplementedError
399 |
400 | def __str__(self):
401 | """
402 | Returns a comma separated string of gtids.
403 | """
404 | return ",".join(str(x) for x in self.gtids)
405 |
406 | def __repr__(self):
407 | return "" % self.gtids
408 |
409 | @property
410 | def encoded_length(self):
411 | return 8 + sum(x.encoded_length for x in self.gtids) # n_sids
412 |
413 | def encoded(self):
414 | """Encode a GtidSet in binary
415 | Bytes are in **little endian**.
416 |
417 | - `n_sid`: u64 is the number of Gtid to read
418 | - `Gtid`: `n_sid` * `Gtid_encoded_size` times.
419 | See`Gtid.encode` documentation for details.
420 |
421 | ```txt
422 | Alligned on u64 bit.
423 | +-+-+-+-+-+-+-+-+-+-+
424 | | n_gtid u64 |
425 | +-+-+-+-+-+-+-+-+-+-+
426 | | Gtid | - Repeated n_gtid times
427 | - - - - - - - - - - -
428 | ```
429 | """
430 | return b"" + (
431 | struct.pack(" bool:
46 | if type in [JSONB_TYPE_UINT16, JSONB_TYPE_INT16, JSONB_TYPE_LITERAL]:
47 | return True
48 | elif type in [JSONB_TYPE_INT32, JSONB_TYPE_UINT32]:
49 | return not is_small
50 | return False
51 |
52 |
53 | def parse_json(type: bytes, data: bytes):
54 | if type == JSONB_TYPE_SMALL_OBJECT:
55 | v = parse_json_object_or_array(data, True, True)
56 | elif type == JSONB_TYPE_LARGE_OBJECT:
57 | v = parse_json_object_or_array(data, False, True)
58 | elif type == JSONB_TYPE_SMALL_ARRAY:
59 | v = parse_json_object_or_array(data, True, False)
60 | elif type == JSONB_TYPE_LARGE_ARRAY:
61 | v = parse_json_object_or_array(data, False, False)
62 | elif type == JSONB_TYPE_LITERAL:
63 | v = parse_literal(data)
64 | elif type == JSONB_TYPE_INT16:
65 | v = parse_int16(data)
66 | elif type == JSONB_TYPE_UINT16:
67 | v = parse_uint16(data)
68 | elif type == JSONB_TYPE_INT32:
69 | v = parse_int32(data)
70 | elif type == JSONB_TYPE_UINT32:
71 | v = parse_uint32(data)
72 | elif type == JSONB_TYPE_INT64:
73 | v = parse_int64(data)
74 | elif type == JSONB_TYPE_UINT64:
75 | v = parse_uint64(data)
76 | elif type == JSONB_TYPE_DOUBLE:
77 | v = parse_double(data)
78 | elif type == JSONB_TYPE_STRING:
79 | length, n = decode_variable_length(data)
80 | v = parse_string(n, length, data)
81 | elif type == JSONB_TYPE_OPAQUE:
82 | v = parse_opaque(data)
83 | else:
84 | raise ValueError(f"Json type {type} is not handled")
85 | return v
86 |
87 |
88 | def parse_json_object_or_array(bytes, is_small, is_object):
89 | offset_size = JSONB_SMALL_OFFSET_SIZE if is_small else JSONB_LARGE_OFFSET_SIZE
90 | count = decode_count(bytes, is_small)
91 | size = decode_count(bytes[offset_size:], is_small)
92 | if is_small:
93 | key_entry_size = JSONB_KEY_ENTRY_SIZE_SMALL
94 | value_entry_size = JSONB_VALUE_ENTRY_SIZE_SMALL
95 | else:
96 | key_entry_size = JSONB_KEY_ENTRY_SIZE_LARGE
97 | value_entry_size = JSONB_VALUE_ENTRY_SIZE_LARGE
98 | if is_data_short(bytes, size):
99 | raise ValueError(
100 | "Before MySQL 5.7.22, json type generated column may have invalid value"
101 | )
102 |
103 | header_size = 2 * offset_size + count * value_entry_size
104 |
105 | if is_object:
106 | header_size += count * key_entry_size
107 |
108 | if header_size > size:
109 | raise ValueError("header size > size")
110 |
111 | keys = []
112 | if is_object:
113 | keys = []
114 | for i in range(count):
115 | entry_offset = 2 * offset_size + key_entry_size * i
116 | key_offset = decode_count(bytes[entry_offset:], is_small)
117 | key_length = decode_uint(bytes[entry_offset + offset_size :])
118 | keys.append(bytes[key_offset : key_offset + key_length])
119 |
120 | values = {}
121 | for i in range(count):
122 | entry_offset = 2 * offset_size + value_entry_size * i
123 | if is_object:
124 | entry_offset += key_entry_size * count
125 | json_type = bytes[entry_offset]
126 | if is_json_inline_value(json_type, is_small):
127 | values[i] = parse_json(
128 | json_type, bytes[entry_offset + 1 : entry_offset + value_entry_size]
129 | )
130 | continue
131 | value_offset = decode_count(bytes[entry_offset + 1 :], is_small)
132 | if is_data_short(bytes, value_offset):
133 | return None
134 | values[i] = parse_json(json_type, bytes[value_offset:])
135 | if not is_object:
136 | return list(values.values())
137 | out = {}
138 | for i in range(count):
139 | out[keys[i]] = values[i]
140 | return out
141 |
142 |
143 | def parse_literal(data: bytes):
144 | json_type = data[0]
145 | if json_type == JSONB_LITERAL_NULL:
146 | return None
147 | elif json_type == JSONB_LITERAL_TRUE:
148 | return True
149 | elif json_type == JSONB_LITERAL_FALSE:
150 | return False
151 |
152 | raise ValueError("NOT LITERAL TYPE")
153 |
154 |
155 | def parse_opaque(data: bytes):
156 | if is_data_short(data, 1):
157 | return None
158 | type_ = data[0]
159 | data = data[1:]
160 |
161 | length, n = decode_variable_length(data)
162 | data = data[n : n + length]
163 |
164 | if type_ in [FIELD_TYPE.NEWDECIMAL, FIELD_TYPE.DECIMAL]:
165 | return decode_decimal(data)
166 | elif type_ in [FIELD_TYPE.TIME, FIELD_TYPE.TIME2]:
167 | return decode_time(data)
168 | elif type_ in [FIELD_TYPE.DATE, FIELD_TYPE.DATETIME, FIELD_TYPE.DATETIME2]:
169 | return decode_datetime(data)
170 | else:
171 | return data.decode(errors="ignore")
172 |
173 |
174 | class BinLogPacketWrapper(object):
175 | """
176 | Bin Log Packet Wrapper. It uses an existing packet object, and wraps
177 | around it, exposing useful variables while still providing access
178 | to the original packet objects variables and methods.
179 | """
180 |
181 | __event_map = {
182 | # event
183 | constants.QUERY_EVENT: event.QueryEvent,
184 | constants.ROTATE_EVENT: event.RotateEvent,
185 | constants.FORMAT_DESCRIPTION_EVENT: event.FormatDescriptionEvent,
186 | constants.XID_EVENT: event.XidEvent,
187 | constants.INTVAR_EVENT: event.IntvarEvent,
188 | constants.GTID_LOG_EVENT: event.GtidEvent,
189 | constants.PREVIOUS_GTIDS_LOG_EVENT: event.PreviousGtidsEvent,
190 | constants.STOP_EVENT: event.StopEvent,
191 | constants.BEGIN_LOAD_QUERY_EVENT: event.BeginLoadQueryEvent,
192 | constants.EXECUTE_LOAD_QUERY_EVENT: event.ExecuteLoadQueryEvent,
193 | constants.HEARTBEAT_LOG_EVENT: event.HeartbeatLogEvent,
194 | constants.XA_PREPARE_EVENT: event.XAPrepareEvent,
195 | constants.ROWS_QUERY_LOG_EVENT: event.RowsQueryLogEvent,
196 | constants.RAND_EVENT: event.RandEvent,
197 | constants.USER_VAR_EVENT: event.UserVarEvent,
198 | # row_event
199 | constants.UPDATE_ROWS_EVENT_V1: row_event.UpdateRowsEvent,
200 | constants.WRITE_ROWS_EVENT_V1: row_event.WriteRowsEvent,
201 | constants.DELETE_ROWS_EVENT_V1: row_event.DeleteRowsEvent,
202 | constants.UPDATE_ROWS_EVENT_V2: row_event.UpdateRowsEvent,
203 | constants.WRITE_ROWS_EVENT_V2: row_event.WriteRowsEvent,
204 | constants.DELETE_ROWS_EVENT_V2: row_event.DeleteRowsEvent,
205 | constants.TABLE_MAP_EVENT: row_event.TableMapEvent,
206 | # 5.6 GTID enabled replication events
207 | constants.ANONYMOUS_GTID_LOG_EVENT: event.NotImplementedEvent,
208 | # MariaDB GTID
209 | constants.MARIADB_ANNOTATE_ROWS_EVENT: event.MariadbAnnotateRowsEvent,
210 | constants.MARIADB_BINLOG_CHECKPOINT_EVENT: event.MariadbBinLogCheckPointEvent,
211 | constants.MARIADB_GTID_EVENT: event.MariadbGtidEvent,
212 | constants.MARIADB_GTID_GTID_LIST_EVENT: event.MariadbGtidListEvent,
213 | constants.MARIADB_START_ENCRYPTION_EVENT: event.MariadbStartEncryptionEvent,
214 | }
215 |
216 | def __init__(
217 | self,
218 | from_packet,
219 | table_map,
220 | ctl_connection,
221 | mysql_version,
222 | use_checksum,
223 | allowed_events,
224 | only_tables,
225 | ignored_tables,
226 | only_schemas,
227 | ignored_schemas,
228 | freeze_schema,
229 | ignore_decode_errors,
230 | verify_checksum,
231 | optional_meta_data,
232 | ):
233 | # -1 because we ignore the ok byte
234 | self.read_bytes = 0
235 | # Used when we want to override a value in the data buffer
236 | self.__data_buffer = b""
237 |
238 | self.packet = from_packet
239 | self.charset = ctl_connection.charset
240 |
241 | # OK value
242 | # timestamp
243 | # event_type
244 | # server_id
245 | # log_pos
246 | # flags
247 | unpack = struct.unpack(" 0:
293 | data = self.__data_buffer[:size]
294 | self.__data_buffer = self.__data_buffer[size:]
295 | if len(data) == size:
296 | return data
297 | else:
298 | return data + self.packet.read(size - len(data))
299 | return self.packet.read(size)
300 |
301 | def unread(self, data):
302 | """Push again data in data buffer. It's use when you want
303 | to extract a bit from a value a let the rest of the code normally
304 | read the datas"""
305 | self.read_bytes -= len(data)
306 | self.__data_buffer += data
307 |
308 | def advance(self, size):
309 | size = int(size)
310 | self.read_bytes += size
311 | buffer_len = len(self.__data_buffer)
312 | if buffer_len > 0:
313 | self.__data_buffer = self.__data_buffer[size:]
314 | if size > buffer_len:
315 | self.packet.advance(size - buffer_len)
316 | else:
317 | self.packet.advance(size)
318 |
319 | def read_length_coded_binary(self):
320 | """Read a 'Length Coded Binary' number from the data buffer.
321 |
322 | Length coded numbers can be anywhere from 1 to 9 bytes depending
323 | on the value of the first byte.
324 |
325 | From PyMYSQL source code
326 | """
327 | c = struct.unpack("!B", self.read(1))[0]
328 | if c == NULL_COLUMN:
329 | return None
330 | if c < UNSIGNED_CHAR_COLUMN:
331 | return c
332 | elif c == UNSIGNED_SHORT_COLUMN:
333 | return self.unpack_uint16(self.read(UNSIGNED_SHORT_LENGTH))
334 | elif c == UNSIGNED_INT24_COLUMN:
335 | return self.unpack_int24(self.read(UNSIGNED_INT24_LENGTH))
336 | elif c == UNSIGNED_INT64_COLUMN:
337 | return self.unpack_int64(self.read(UNSIGNED_INT64_LENGTH))
338 |
339 | def read_length_coded_string(self):
340 | """Read a 'Length Coded String' from the data buffer.
341 |
342 | A 'Length Coded String' consists first of a length coded
343 | (unsigned, positive) integer represented in 1-9 bytes followed by
344 | that many bytes of binary data. (For example "cat" would be "3cat".)
345 |
346 | From PyMYSQL source code
347 | """
348 | length = self.read_length_coded_binary()
349 | if length is None:
350 | return None
351 | return self.read(length).decode()
352 |
353 | def __getattr__(self, key):
354 | if hasattr(self.packet, key):
355 | return getattr(self.packet, key)
356 |
357 | raise AttributeError(f"{self.__class__} instance has no attribute '{key}'")
358 |
359 | def read_int_be_by_size(self, size):
360 | """Read a big endian integer values based on byte number"""
361 | if size == 1:
362 | return struct.unpack(">b", self.read(size))[0]
363 | elif size == 2:
364 | return struct.unpack(">h", self.read(size))[0]
365 | elif size == 3:
366 | return self.read_int24_be()
367 | elif size == 4:
368 | return struct.unpack(">i", self.read(size))[0]
369 | elif size == 5:
370 | return self.read_int40_be()
371 | elif size == 8:
372 | return struct.unpack(">l", self.read(size))[0]
373 |
374 | def read_uint_by_size(self, size):
375 | """Read a little endian integer values based on byte number"""
376 | if size == 1:
377 | return self.read_uint8()
378 | elif size == 2:
379 | return self.read_uint16()
380 | elif size == 3:
381 | return self.read_uint24()
382 | elif size == 4:
383 | return self.read_uint32()
384 | elif size == 5:
385 | return self.read_uint40()
386 | elif size == 6:
387 | return self.read_uint48()
388 | elif size == 7:
389 | return self.read_uint56()
390 | elif size == 8:
391 | return self.read_uint64()
392 |
393 | def read_length_coded_pascal_string(self, size):
394 | """Read a string with length coded using pascal style.
395 | The string start by the size of the string
396 | """
397 | length = self.read_uint_by_size(size)
398 | return self.read(length)
399 |
400 | def read_variable_length_string(self):
401 | """Read a variable length string where the first 1-5 bytes stores the
402 | length of the string.
403 |
404 | For each byte, the first bit being high indicates another byte must be
405 | read.
406 | """
407 | byte = 0x80
408 | length = 0
409 | bits_read = 0
410 | while byte & 0x80 != 0:
411 | byte = struct.unpack("!B", self.read(1))[0]
412 | length = length | ((byte & 0x7F) << bits_read)
413 | bits_read = bits_read + 7
414 | return self.read(length)
415 |
416 | def read_int24(self):
417 | a, b, c = struct.unpack("BBB", self.read(3))
418 | res = a | (b << 8) | (c << 16)
419 | if res >= 0x800000:
420 | res -= 0x1000000
421 | return res
422 |
423 | def read_int24_be(self):
424 | a, b, c = struct.unpack("BBB", self.read(3))
425 | res = (a << 16) | (b << 8) | c
426 | if res >= 0x800000:
427 | res -= 0x1000000
428 | return res
429 |
430 | def read_uint8(self):
431 | return struct.unpack("IB", self.read(5))
455 | return b + (a << 8)
456 |
457 | def read_uint48(self):
458 | a, b, c = struct.unpack("= 5.6:
69 | return True
70 | return False
71 |
72 | def isMySQL57(self):
73 | version = float(self.getMySQLVersion().rsplit(".", 1)[0])
74 | return version == 5.7
75 |
76 | def isMySQL80AndMore(self):
77 | version = float(self.getMySQLVersion().rsplit(".", 1)[0])
78 | return version >= 8.0
79 |
80 | def isMySQL8014AndMore(self):
81 | version = float(self.getMySQLVersion().rsplit(".", 1)[0])
82 | version_detail = int(self.getMySQLVersion().rsplit(".", 1)[1])
83 | if version > 8.0:
84 | return True
85 | return version == 8.0 and version_detail >= 14
86 |
87 | def isMariaDB(self):
88 | if self.__is_mariaDB is None:
89 | self.__is_mariaDB = (
90 | "MariaDB" in self.execute("SELECT VERSION()").fetchone()[0]
91 | )
92 | return self.__is_mariaDB
93 |
94 | @property
95 | def supportsGTID(self):
96 | if not self.isMySQL56AndMore():
97 | return False
98 | return self.execute("SELECT @@global.gtid_mode ").fetchone()[0] == "ON"
99 |
100 | def connect_conn_control(self, db):
101 | if self.conn_control is not None:
102 | self.conn_control.close()
103 | self.conn_control = pymysql.connect(**db)
104 |
105 | def tearDown(self):
106 | self.conn_control.close()
107 | self.conn_control = None
108 | self.stream.close()
109 | self.stream = None
110 |
111 | def execute(self, query):
112 | c = self.conn_control.cursor()
113 | c.execute(query)
114 | return c
115 |
116 | def execute_with_args(self, query, args):
117 | c = self.conn_control.cursor()
118 | c.execute(query, args)
119 | return c
120 |
121 | def resetBinLog(self):
122 | self.execute("RESET MASTER")
123 | if self.stream is not None:
124 | self.stream.close()
125 | self.stream = BinLogStreamReader(
126 | self.database, server_id=1024, ignored_events=self.ignoredEvents()
127 | )
128 |
129 | def set_sql_mode(self):
130 | """set sql_mode to test with same sql_mode (mysql 5.7 sql_mode default is changed)"""
131 | version = float(self.getMySQLVersion().rsplit(".", 1)[0])
132 | if version == 5.7:
133 | self.execute("SET @@sql_mode='NO_ENGINE_SUBSTITUTION'")
134 |
135 | def bin_log_format(self):
136 | query = "SELECT @@binlog_format"
137 | cursor = self.execute(query)
138 | result = cursor.fetchone()
139 | return result[0]
140 |
141 | def bin_log_basename(self):
142 | cursor = self.execute("SELECT @@log_bin_basename")
143 | bin_log_basename = cursor.fetchone()[0]
144 | bin_log_basename = bin_log_basename.split("/")[-1]
145 | return bin_log_basename
146 |
--------------------------------------------------------------------------------
/pymysqlreplication/tests/benchmark.py:
--------------------------------------------------------------------------------
1 | # This is a sample script in order to make benchmark
2 | # on library speed.
3 | #
4 | #
5 |
6 | import pymysql
7 | import time
8 | import os
9 | from pymysqlreplication import BinLogStreamReader
10 | from pymysqlreplication.row_event import *
11 |
12 |
13 | def execute(con, query):
14 | c = con.cursor()
15 | c.execute(query)
16 | return c
17 |
18 |
19 | def consume_events():
20 | stream = BinLogStreamReader(
21 | connection_settings=database,
22 | server_id=3,
23 | resume_stream=False,
24 | blocking=True,
25 | only_events=[UpdateRowsEvent],
26 | only_tables=["test"],
27 | )
28 | start = time.clock()
29 | i = 0.0
30 | for binlogevent in stream:
31 | i += 1.0
32 | if i % 1000 == 0:
33 | print(f"{i / (time.clock()- start)} event by seconds ({i} total)")
34 |
35 | stream.close()
36 |
37 |
38 | database = {
39 | "host": "localhost",
40 | "user": "root",
41 | "passwd": "",
42 | "use_unicode": True,
43 | "charset": "utf8",
44 | "db": "pymysqlreplication_test",
45 | }
46 |
47 | conn = pymysql.connect(**database)
48 |
49 | execute(conn, "DROP DATABASE IF EXISTS pymysqlreplication_test")
50 | execute(conn, "CREATE DATABASE pymysqlreplication_test")
51 | conn = pymysql.connect(**database)
52 | execute(conn, "CREATE TABLE test (i INT) ENGINE = MEMORY")
53 | execute(conn, "INSERT INTO test VALUES(1)")
54 | execute(conn, "CREATE TABLE test2 (i INT) ENGINE = MEMORY")
55 | execute(conn, "INSERT INTO test2 VALUES(1)")
56 | execute(conn, "RESET MASTER")
57 |
58 |
59 | if os.fork() != 0:
60 | print("Start insert data")
61 | while True:
62 | execute(conn, "UPDATE test SET i = i + 1;")
63 | execute(conn, "UPDATE test2 SET i = i + 1;")
64 | else:
65 | consume_events()
66 | # cProfile.run('consume_events()')
67 |
--------------------------------------------------------------------------------
/pymysqlreplication/tests/binlogfilereader.py:
--------------------------------------------------------------------------------
1 | """Read binlog files"""
2 | import struct
3 |
4 | from pymysqlreplication import constants
5 | from pymysqlreplication.event import FormatDescriptionEvent
6 | from pymysqlreplication.event import QueryEvent
7 | from pymysqlreplication.event import RotateEvent
8 | from pymysqlreplication.event import XidEvent
9 | from pymysqlreplication.row_event import TableMapEvent
10 | from pymysqlreplication.row_event import WriteRowsEvent
11 |
12 |
13 | class SimpleBinLogFileReader(object):
14 | """Read binlog files"""
15 |
16 | _expected_magic = b"\xfebin"
17 |
18 | def __init__(self, file_path, only_events=None):
19 | self._current_event = None
20 | self._file = None
21 | self._file_path = file_path
22 | self._only_events = only_events
23 | self._pos = None
24 |
25 | def fetchone(self):
26 | """Fetch one record from the binlog file"""
27 | if self._pos is None or self._pos < 4:
28 | self._read_magic()
29 | while True:
30 | event = self._read_event()
31 | self._current_event = event
32 | if event is None:
33 | return None
34 | if self._filter_events(event):
35 | return event
36 |
37 | def truncatebinlog(self):
38 | """Truncate the binlog file at the current event"""
39 | if self._current_event is not None:
40 | self._file.truncate(self._current_event.pos)
41 |
42 | def _filter_events(self, event):
43 | """Return True if an event can be returned"""
44 | # It would be good if we could reuse the __event_map in
45 | # packet.BinLogPacketWrapper.
46 | event_type = {
47 | constants.QUERY_EVENT: QueryEvent,
48 | constants.ROTATE_EVENT: RotateEvent,
49 | constants.FORMAT_DESCRIPTION_EVENT: FormatDescriptionEvent,
50 | constants.XID_EVENT: XidEvent,
51 | constants.TABLE_MAP_EVENT: TableMapEvent,
52 | constants.WRITE_ROWS_EVENT_V2: WriteRowsEvent,
53 | }.get(event.event_type)
54 | return event_type in self._only_events
55 |
56 | def _open_file(self):
57 | """Open the file at ``self._file_path``"""
58 | if self._file is None:
59 | self._file = open(self._file_path, "rb+")
60 | self._pos = self._file.tell()
61 | assert self._pos == 0
62 |
63 | def _read_event(self):
64 | """Read an event from the binlog file"""
65 | # Assuming a binlog version > 1
66 | headerlength = 19
67 | header = self._file.read(headerlength)
68 | event_pos = self._pos
69 | self._pos += len(header)
70 | if len(header) == 0:
71 | return None
72 | event = SimpleBinLogEvent(header)
73 | event.set_pos(event_pos)
74 | if event.event_size < headerlength:
75 | messagefmt = "Event size {0} is too small"
76 | message = messagefmt.format(event.event_size)
77 | raise EventSizeTooSmallError(message)
78 | else:
79 | body = self._file.read(event.event_size - headerlength)
80 | self._pos += len(body)
81 | event.set_body(body)
82 | return event
83 |
84 | def _read_magic(self):
85 | """Read the first four *magic* bytes of the binlog file"""
86 | self._open_file()
87 | if self._pos == 0:
88 | magic = self._file.read(4)
89 | if magic == self._expected_magic:
90 | self._pos += len(magic)
91 | else:
92 | messagefmt = "Magic bytes {0!r} did not match expected {1!r}"
93 | message = messagefmt.format(magic, self._expected_magic)
94 | raise BadMagicBytesError(message)
95 |
96 | def __iter__(self):
97 | return iter(self.fetchone, None)
98 |
99 | def __repr__(self):
100 | cls = self.__class__
101 | mod = cls.__module__
102 | name = cls.__name__
103 | only = [type(x).__name__ for x in self._only_events]
104 | return f"<{mod}.{name}(file_path={self._file_path}, only_events={only})>"
105 |
106 |
107 | # pylint: disable=too-many-instance-attributes
108 | class SimpleBinLogEvent(object):
109 | """An event from a binlog file"""
110 |
111 | def __init__(self, header):
112 | """Initialize the Event with the event header"""
113 | unpacked = struct.unpack(""
137 |
138 |
139 | class BadMagicBytesError(Exception):
140 | """The binlog file magic bytes did not match the specification"""
141 |
142 |
143 | class EventSizeTooSmallError(Exception):
144 | """The event size was smaller than the length of the event header"""
145 |
--------------------------------------------------------------------------------
/pymysqlreplication/tests/config.json:
--------------------------------------------------------------------------------
1 | {
2 | "mysql-5": {
3 | "host": "localhost",
4 | "user": "root",
5 | "passwd": "",
6 | "port": 3306,
7 | "use_unicode": true,
8 | "charset": "utf8",
9 | "db": "pymysqlreplication_test"
10 | },
11 | "mysql-5-ctl": {
12 | "host": "localhost",
13 | "user": "root",
14 | "passwd": "",
15 | "port": 3307,
16 | "use_unicode": true,
17 | "charset": "utf8",
18 | "db": "pymysqlreplication_test"
19 | },
20 | "mariadb-10": {
21 | "host": "localhost",
22 | "user": "root",
23 | "passwd": "",
24 | "port": 3308,
25 | "use_unicode": true,
26 | "charset": "utf8",
27 | "db": "pymysqlreplication_test"
28 | },
29 | "mysql-8": {
30 | "host": "localhost",
31 | "user": "root",
32 | "passwd": "",
33 | "port": 3309,
34 | "use_unicode": true,
35 | "charset": "utf8",
36 | "db": "pymysqlreplication_test"
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/pymysqlreplication/tests/conftest.py:
--------------------------------------------------------------------------------
1 | import pytest
2 |
3 |
4 | def pytest_addoption(parser):
5 | parser.addoption("--db", action="store", default="mysql-5")
6 |
7 |
8 | @pytest.fixture
9 | def get_db(request):
10 | return request.config.getoption("--db")
11 |
--------------------------------------------------------------------------------
/pymysqlreplication/tests/test_abnormal.py:
--------------------------------------------------------------------------------
1 | """Test abnormal conditions, such as caused by a MySQL crash
2 | """
3 | import os.path
4 |
5 | from pymysqlreplication.tests import base
6 | from pymysqlreplication.tests.binlogfilereader import SimpleBinLogFileReader
7 | from pymysqlreplication import BinLogStreamReader
8 | from pymysqlreplication.event import GtidEvent
9 | from pymysqlreplication.event import RotateEvent
10 |
11 |
12 | class TestAbnormalBinLogStreamReader(base.PyMySQLReplicationTestCase):
13 | """Test abnormal condition handling in the BinLogStreamReader"""
14 |
15 | @staticmethod
16 | def ignored_events():
17 | """Events the BinLogStreamReader should ignore"""
18 | return [GtidEvent]
19 |
20 | def test_no_trailing_rotate_event(self):
21 | """A missing RotateEvent and skip_to_timestamp cause corruption
22 |
23 | This test shows that a binlog file which lacks the trailing RotateEvent
24 | and the use of the ``skip_to_timestamp`` argument together can cause
25 | the table_map to become corrupt. The trailing RotateEvent has a
26 | timestamp, but may be lost if the server crashes. The leading
27 | RotateEvent in the next binlog file always has a timestamp of 0, thus
28 | is discarded when ``skip_to_timestamp`` is greater than zero.
29 | """
30 | self.execute(
31 | "CREATE TABLE test (id INT NOT NULL AUTO_INCREMENT, "
32 | "data VARCHAR (50) NOT NULL, PRIMARY KEY(id))"
33 | )
34 | self.execute("SET AUTOCOMMIT = 0")
35 | self.execute('INSERT INTO test(id, data) VALUES (1, "Hello")')
36 | self.execute("COMMIT")
37 | timestamp = self.execute("SELECT UNIX_TIMESTAMP()").fetchone()[0]
38 | self.execute("FLUSH BINARY LOGS")
39 | self.execute('INSERT INTO test(id, data) VALUES (2, "Hi")')
40 | self.stream.close()
41 | self._remove_trailing_rotate_event_from_first_binlog()
42 |
43 | binlog = self.execute("SHOW BINARY LOGS").fetchone()[0]
44 |
45 | self.stream = BinLogStreamReader(
46 | self.database,
47 | server_id=1024,
48 | log_pos=4,
49 | log_file=binlog,
50 | skip_to_timestamp=timestamp,
51 | ignored_events=self.ignored_events(),
52 | )
53 | for _ in self.stream:
54 | pass
55 | # The table_map should be empty because of the binlog being rotated.
56 | self.assertEqual({}, self.stream.table_map)
57 |
58 | def _remove_trailing_rotate_event_from_first_binlog(self):
59 | """Remove the trailing RotateEvent from the first binlog
60 |
61 | According to the MySQL Internals Manual, a RotateEvent will be added to
62 | the end of a binlog when the binlog is rotated. This may not happen if
63 | the server crashes, for example.
64 |
65 | This method removes the trailing RotateEvent to verify that the library
66 | properly handles this case.
67 | """
68 | datadir = self.execute("SHOW VARIABLES LIKE 'datadir'").fetchone()[1]
69 | binlog = self.execute("SHOW BINARY LOGS").fetchone()[0]
70 | binlogpath = os.path.join(datadir, binlog)
71 |
72 | reader = SimpleBinLogFileReader(binlogpath, only_events=[RotateEvent])
73 | for _ in reader:
74 | reader.truncatebinlog()
75 | break
76 |
--------------------------------------------------------------------------------
/pymysqlreplication/tests/test_data_objects.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from pymysqlreplication.column import Column
3 | from pymysqlreplication.table import Table
4 | from pymysqlreplication.event import GtidEvent
5 |
6 | from pymysqlreplication.tests import base
7 |
8 | __all__ = ["TestDataObjects"]
9 |
10 |
11 | class TestDataObjects(base.PyMySQLReplicationTestCase):
12 | def ignoredEvents(self):
13 | return [GtidEvent]
14 |
15 | def test_column_serializable(self):
16 | col = Column(1, None)
17 |
18 | serialized = col.serializable_data()
19 | self.assertIn("type", serialized)
20 | self.assertEqual(col, Column(**serialized))
21 |
22 | def test_table(self):
23 | tbl = Table(1, "test_schema", "test_table", [], [])
24 |
25 | serialized = tbl.serializable_data()
26 | self.assertIn("table_id", serialized)
27 | self.assertIn("schema", serialized)
28 | self.assertIn("table", serialized)
29 | self.assertIn("columns", serialized)
30 |
31 | self.assertEqual(tbl, Table(**serialized))
32 |
33 |
34 | if __name__ == "__main__":
35 | unittest.main()
36 |
--------------------------------------------------------------------------------
/pymysqlreplication/util/__init__.py:
--------------------------------------------------------------------------------
1 | from .bytes import *
2 |
--------------------------------------------------------------------------------
/pymysqlreplication/util/bytes.py:
--------------------------------------------------------------------------------
1 | import datetime
2 | import decimal
3 | import struct
4 | import sys
5 |
6 |
7 | def is_data_short(data: bytes, expected: int):
8 | if len(data) < expected:
9 | return True
10 | return False
11 |
12 |
13 | def decode_count(data: bytes, is_small: bool):
14 | if is_small:
15 | return parse_uint16(data)
16 | else:
17 | return parse_uint32(data)
18 |
19 |
20 | def decode_uint(data: bytes):
21 | if is_data_short(data, 2):
22 | return 0
23 | return parse_uint16(data)
24 |
25 |
26 | def decode_variable_length(data: bytes):
27 | max_count = 5
28 | if len(data) < max_count:
29 | max_count = len(data)
30 | pos = 0
31 | length = 0
32 | for _ in range(max_count):
33 | v = data[pos]
34 | length |= (v & 0x7F) << (7 * pos)
35 | pos += 1
36 | if v & 0x80 == 0:
37 | if length > sys.maxsize - 1:
38 | return 0, 0
39 | return int(length), pos
40 |
41 | return 0, 0
42 |
43 |
44 | def parse_decimal_from_bytes(
45 | raw_decimal: bytes, precision: int, decimals: int
46 | ) -> decimal.Decimal:
47 | """
48 | Parse decimal from bytes.
49 | """
50 | digits_per_integer = 9
51 | compressed_bytes = [0, 1, 1, 2, 2, 3, 3, 4, 4, 4]
52 | integral = precision - decimals
53 |
54 | uncomp_integral, comp_integral = divmod(integral, digits_per_integer)
55 | uncomp_fractional, comp_fractional = divmod(decimals, digits_per_integer)
56 |
57 | res = "-" if not raw_decimal[0] & 0x80 else ""
58 | mask = -1 if res == "-" else 0
59 | raw_decimal = bytearray([raw_decimal[0] ^ 0x80]) + raw_decimal[1:]
60 |
61 | def decode_decimal_decompress_value(comp_indx, data, mask):
62 | size = compressed_bytes[comp_indx]
63 | if size > 0:
64 | databuff = bytearray(data[:size])
65 | for i in range(size):
66 | databuff[i] = (databuff[i] ^ mask) & 0xFF
67 | return size, int.from_bytes(databuff, byteorder="big")
68 | return 0, 0
69 |
70 | pointer, value = decode_decimal_decompress_value(comp_integral, raw_decimal, mask)
71 | res += str(value)
72 |
73 | for _ in range(uncomp_integral):
74 | value = struct.unpack(">i", raw_decimal[pointer : pointer + 4])[0] ^ mask
75 | res += f"{value:09}"
76 | pointer += 4
77 |
78 | res += "."
79 |
80 | for _ in range(uncomp_fractional):
81 | value = struct.unpack(">i", raw_decimal[pointer : pointer + 4])[0] ^ mask
82 | res += f"{value:09}"
83 | pointer += 4
84 |
85 | size, value = decode_decimal_decompress_value(
86 | comp_fractional, raw_decimal[pointer:], mask
87 | )
88 | if size > 0:
89 | res += f"{value:0{comp_fractional}d}"
90 | return decimal.Decimal(res)
91 |
92 |
93 | def decode_decimal(data: bytes):
94 | return parse_decimal_from_bytes(data[2:], data[0], data[1])
95 |
96 |
97 | def decode_time(data: bytes):
98 | v = parse_int64(data)
99 |
100 | if v == 0:
101 | return datetime.time(hour=0, minute=0, second=0)
102 |
103 | if v < 0:
104 | v = -v
105 | int_part = v >> 24
106 | hour = (int_part >> 12) % (1 << 10)
107 | min = (int_part >> 6) % (1 << 6)
108 | sec = int_part % (1 << 6)
109 | frac = v % (1 << 24)
110 | return datetime.time(hour=hour, minute=min, second=sec, microsecond=frac)
111 |
112 |
113 | def decode_datetime(data):
114 | v = parse_int64(data)
115 |
116 | if v == 0:
117 | # datetime parse Error
118 | return "0000-00-00 00:00:00"
119 |
120 | if v < 0:
121 | v = -v
122 |
123 | int_part = v >> 24
124 | ymd = int_part >> 17
125 | ym = ymd >> 5
126 | hms = int_part % (1 << 17)
127 |
128 | year = ym // 13
129 | month = ym % 13
130 | day = ymd % (1 << 5)
131 | hour = hms >> 12
132 | minute = (hms >> 6) % (1 << 6)
133 | second = hms % (1 << 6)
134 | frac = v % (1 << 24)
135 |
136 | return datetime.datetime(
137 | year=year,
138 | month=month,
139 | day=day,
140 | hour=hour,
141 | minute=minute,
142 | second=second,
143 | microsecond=frac,
144 | )
145 |
146 |
147 | def parse_int16(data: bytes):
148 | return struct.unpack("=1.1.0"],
57 | )
58 |
--------------------------------------------------------------------------------
/test.Dockerfile:
--------------------------------------------------------------------------------
1 | ARG BASE_IMAGE=${BASE_IMAGE:-python:3.11-alpine}
2 | FROM ${BASE_IMAGE}
3 |
4 | COPY .mariadb .mariadb
5 | COPY pymysqlreplication pymysqlreplication
6 | COPY README.md README.md
7 | COPY setup.py setup.py
8 | RUN apk add bind-tools
9 | RUN apk add mysql-client
10 | RUN pip install .
11 | RUN pip install pytest
12 |
13 | ARG MYSQL_5_7
14 | ENV MYSQL_5_7 ${MYSQL_5_7}
15 |
16 | ARG MYSQL_5_7_CTL
17 | ENV MYSQL_5_7_CTL ${MYSQL_5_7_CTL}
18 |
19 | ARG MYSQL_5_7_CTL_PORT
20 | ENV MYSQL_5_7_CTL_PORT ${MYSQL_5_7_CTL_PORT}
21 |
22 | ARG MYSQL_8_0
23 | ENV MYSQL_8_0 ${MYSQL_8_0}
24 |
25 | ARG MYSQL_8_0_PORT
26 | ENV MYSQL_8_0_PORT ${MYSQL_8_0_PORT}
27 |
28 | ARG MARIADB_10_6
29 | ENV MARIADB_10_6 ${MARIADB_10_6}
30 |
31 | ARG MARIADB_10_6_PORT
32 | ENV MARIADB_10_6_PORT ${MARIADB_10_6_PORT}
33 |
--------------------------------------------------------------------------------