├── .coveragerc ├── .github └── workflows │ └── tests.yaml ├── .gitignore ├── LICENSE.txt ├── README.md ├── appveyor.yml ├── benchmarks ├── benchmarks.py ├── testobj.py └── testobj_delay.py ├── docs ├── Makefile ├── make.bat └── source │ ├── batteries.rst │ ├── conf.py │ ├── index.rst │ └── pysyncobj.rst ├── examples ├── counter.py ├── kvstorage.py ├── kvstorage_http.py └── lock.py ├── pysyncobj ├── __init__.py ├── atomic_replace.py ├── batteries.py ├── config.py ├── dns_resolver.py ├── encryptor.py ├── fast_queue.py ├── journal.py ├── monotonic.py ├── node.py ├── pickle.py ├── pipe_notifier.py ├── poller.py ├── serializer.py ├── syncobj.py ├── syncobj_admin.py ├── tcp_connection.py ├── tcp_server.py ├── transport.py ├── utility.py ├── version.py └── win_inet_pton.py ├── setup.cfg ├── setup.py ├── syncobj_admin.py ├── test_syncobj.py └── test_zerodowntime ├── README.md ├── proc.py └── test.py /.coveragerc: -------------------------------------------------------------------------------- 1 | [report] 2 | 3 | exclude_lines = 4 | # Have to re-enable the standard pragma 5 | pragma: no cover 6 | # Don't complain if tests don't hit defensive assertion code: 7 | raise NotImplementedError 8 | 9 | show_missing = 1 10 | 11 | [run] 12 | omit = 13 | pysyncobj/win_inet_pton.py 14 | -------------------------------------------------------------------------------- /.github/workflows/tests.yaml: -------------------------------------------------------------------------------- 1 | name: Tests 2 | 3 | on: 4 | pull_request: 5 | push: 6 | branches: 7 | - master 8 | tags: 9 | - '*' 10 | 11 | jobs: 12 | run_tests: 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v3 16 | - name: run_tests 17 | run: > 18 | ls -la && 19 | python3 -m pip install -U pytest && 20 | python3 -m pytest -v -s test_syncobj.py 21 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | .idea/ 3 | MANIFEST 4 | dist/ 5 | *.bak 6 | *.bin 7 | build/ 8 | docs/build* 9 | .DS_Store 10 | .cache/ 11 | pysyncobj.egg-info/ 12 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Filipp Ozinov 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # PySyncObj 2 | 3 | [![Build Status][tests-image]][tests] [![Windows Build Status][appveyor-image]][appveyor] [![Coverage Status][coverage-image]][coverage] [![Release][release-image]][releases] [![License][license-image]][license] [![gitter][gitter-image]][gitter] [![docs][docs-image]][docs] 4 | 5 | [tests-image]: https://github.com/bakwc/PySyncObj/actions/workflows/tests.yaml/badge.svg 6 | [tests]: https://github.com/bakwc/PySyncObj/actions/workflows/tests.yaml 7 | 8 | [appveyor-image]: https://ci.appveyor.com/api/projects/status/github/bakwc/pysyncobj?branch=master&svg=true 9 | [appveyor]: https://ci.appveyor.com/project/bakwc/pysyncobj 10 | 11 | [coverage-image]: https://coveralls.io/repos/github/bakwc/PySyncObj/badge.svg?branch=master 12 | [coverage]: https://coveralls.io/github/bakwc/PySyncObj?branch=master 13 | 14 | [release-image]: https://img.shields.io/badge/release-0.3.14-blue.svg?style=flat 15 | [releases]: https://github.com/bakwc/PySyncObj/releases 16 | 17 | [license-image]: https://img.shields.io/badge/license-MIT-blue.svg?style=flat 18 | [license]: LICENSE.txt 19 | 20 | [gitter-image]: https://badges.gitter.im/bakwc/PySyncObj.svg 21 | [gitter]: https://gitter.im/bakwc/PySyncObj?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge 22 | 23 | [docs-image]: https://readthedocs.org/projects/pysyncobj/badge/?version=latest 24 | [docs]: http://pysyncobj.readthedocs.io/en/latest/ 25 | 26 | PySyncObj is a python library for building fault-tolerant distributed systems. It provides the ability to replicate your application data between multiple servers. It has following features: 27 | 28 | - [raft protocol](http://raft.github.io/) for leader election and log replication 29 | - Log compaction - it use fork for copy-on-write while serializing data on disk 30 | - Dynamic membership changes - you can do it with [syncobj_admin](https://github.com/bakwc/PySyncObj/wiki/syncobj_admin) utility or [directly from your code](https://github.com/bakwc/PySyncObj/wiki/Dynamic-membership-change) 31 | - [Zero downtime deploy](https://github.com/bakwc/PySyncObj/wiki/Zero-downtime-deploy) - no need to stop cluster to update nodes 32 | - In-memory and on-disk serialization - you can use in-memory mode for small data and on-disk for big one 33 | - Encryption - you can set password and use it in external network 34 | - Python2 and Python3 on linux, macos and windows - no dependencies required (only optional one, eg. cryptography) 35 | - Configurable event loop - it can works in separate thread with it's own event loop - or you can call onTick function inside your own one 36 | - Convenient interface - you can easily transform arbitrary class into a replicated one (see example below). 37 | 38 | ## Content 39 | * [Install](#install) 40 | * [Basic Usage](#usage) 41 | * ["Batteries"](#batteries) 42 | * [API Documentation](http://pysyncobj.readthedocs.io) 43 | * [Performance](#performance) 44 | * [Publications](#publications) 45 | 46 | ## Install 47 | PySyncObj itself: 48 | ```bash 49 | pip install pysyncobj 50 | ``` 51 | Cryptography for encryption (optional): 52 | ```bash 53 | pip install cryptography 54 | ``` 55 | 56 | ## Usage 57 | Consider you have a class that implements counter: 58 | ```python 59 | class MyCounter(object): 60 | def __init__(self): 61 | self.__counter = 0 62 | 63 | def incCounter(self): 64 | self.__counter += 1 65 | 66 | def getCounter(self): 67 | return self.__counter 68 | ``` 69 | So, to transform your class into a replicated one: 70 | - Inherit it from SyncObj 71 | - Initialize SyncObj with a self address and a list of partner addresses. Eg. if you have `serverA`, `serverB` and `serverC` and want to use 4321 port, you should use self address `serverA:4321` with partners `[serverB:4321, serverC:4321]` for your application, running at `serverA`; self address `serverB:4321` with partners `[serverA:4321, serverC:4321]` for your application at `serverB`; self address `serverC:4321` with partners `[serverA:4321, serverB:4321]` for app at `serverC`. 72 | - Mark all your methods that modifies your class fields with `@replicated` decorator. 73 | So your final class will looks like: 74 | ```python 75 | class MyCounter(SyncObj): 76 | def __init__(self): 77 | super(MyCounter, self).__init__('serverA:4321', ['serverB:4321', 'serverC:4321']) 78 | self.__counter = 0 79 | 80 | @replicated 81 | def incCounter(self): 82 | self.__counter += 1 83 | 84 | def getCounter(self): 85 | return self.__counter 86 | ``` 87 | And thats all! Now you can call `incCounter` on `serverA`, and check counter value on `serverB` - they will be synchronized. 88 | 89 | ## Batteries 90 | If you just need some distributed data structures - try built-in "batteries". Few examples: 91 | ### Counter & Dict 92 | ```python 93 | from pysyncobj import SyncObj 94 | from pysyncobj.batteries import ReplCounter, ReplDict 95 | 96 | counter1 = ReplCounter() 97 | counter2 = ReplCounter() 98 | dict1 = ReplDict() 99 | syncObj = SyncObj('serverA:4321', ['serverB:4321', 'serverC:4321'], consumers=[counter1, counter2, dict1]) 100 | 101 | counter1.set(42, sync=True) # set initial value to 42, 'sync' means that operation is blocking 102 | counter1.add(10, sync=True) # add 10 to counter value 103 | counter2.inc(sync=True) # increment counter value by one 104 | dict1.set('testKey1', 'testValue1', sync=True) 105 | dict1['testKey2'] = 'testValue2' # this is basically the same as previous, but asynchronous (non-blocking) 106 | print(counter1, counter2, dict1['testKey1'], dict1.get('testKey2')) 107 | ``` 108 | ### Lock 109 | ```python 110 | from pysyncobj import SyncObj 111 | from pysyncobj.batteries import ReplLockManager 112 | 113 | lockManager = ReplLockManager(autoUnlockTime=75) # Lock will be released if connection dropped for more than 75 seconds 114 | syncObj = SyncObj('serverA:4321', ['serverB:4321', 'serverC:4321'], consumers=[lockManager]) 115 | if lockManager.tryAcquire('testLockName', sync=True): 116 | # do some actions 117 | lockManager.release('testLockName') 118 | ``` 119 | You can look at [batteries implementation](https://github.com/bakwc/PySyncObj/blob/master/pysyncobj/batteries.py), [examples](https://github.com/bakwc/PySyncObj/tree/master/examples) and [unit-tests](https://github.com/bakwc/PySyncObj/blob/master/test_syncobj.py) for more use-cases. Also there is an [API documentation](http://pysyncobj.readthedocs.io). Feel free to create proposals and/or pull requests with new batteries, features, etc. Join our [gitter chat](https://gitter.im/bakwc/PySyncObj) if you have any questions. 120 | 121 | 122 | ## Performance 123 | ![15K rps on 3 nodes; 14K rps on 7 nodes;](http://pastexen.com/i/Ge3lnrM1OY.png "RPS vs Cluster Size") 124 | ![22K rps on 10 byte requests; 5K rps on 20Kb requests;](http://pastexen.com/i/0RIsrKxJsV.png "RPS vs Request Size") 125 | 126 | ## Publications 127 | - [Adventures in fault tolerant alerting with Python](https://blog.hostedgraphite.com/2017/05/05/adventures-in-fault-tolerant-alerting-with-python/) 128 | - [Строим распределенную систему c PySyncObj](https://habrahabr.ru/company/wargaming/blog/301398/) 129 | -------------------------------------------------------------------------------- /appveyor.yml: -------------------------------------------------------------------------------- 1 | environment: 2 | 3 | matrix: 4 | 5 | - PYTHON: "C:\\Python27" 6 | # - PYTHON: "C:\\Python34" 7 | - PYTHON: "C:\\Python35" 8 | - PYTHON: "C:\\Python38" 9 | 10 | install: 11 | - "%PYTHON%\\python.exe -m pip install --upgrade pip" 12 | - "%PYTHON%\\python.exe -m pip install pytest" 13 | - "%PYTHON%\\python.exe -m pip install cryptography" 14 | 15 | build: off 16 | 17 | test_script: 18 | - "%PYTHON%\\python.exe -m pytest -v -l test_syncobj.py" 19 | -------------------------------------------------------------------------------- /benchmarks/benchmarks.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import sys 3 | import pickle 4 | from functools import wraps 5 | from subprocess import Popen, PIPE 6 | import os 7 | DEVNULL = open(os.devnull, 'wb') 8 | 9 | START_PORT = 4321 10 | MIN_RPS = 10 11 | MAX_RPS = 40000 12 | 13 | def memoize(fileName): 14 | def doMemoize(func): 15 | if os.path.exists(fileName): 16 | with open(fileName) as f: 17 | cache = pickle.load(f) 18 | else: 19 | cache = {} 20 | @wraps(func) 21 | def wrap(*args): 22 | if args not in cache: 23 | cache[args] = func(*args) 24 | with open(fileName, 'wb') as f: 25 | pickle.dump(cache, f) 26 | return cache[args] 27 | return wrap 28 | return doMemoize 29 | 30 | def singleBenchmark(requestsPerSecond, requestSize, numNodes, numNodesReadonly = 0, delay = False): 31 | rpsPerNode = requestsPerSecond / (numNodes + numNodesReadonly) 32 | cmd = [sys.executable, 'testobj_delay.py' if delay else 'testobj.py', str(rpsPerNode), str(requestSize)] 33 | #cmd = 'python2.7 -m cProfile -s time testobj.py %d %d' % (rpsPerNode, requestSize) 34 | processes = [] 35 | allAddrs = [] 36 | for i in range(numNodes): 37 | allAddrs.append('localhost:%d' % (START_PORT + i)) 38 | for i in range(numNodes): 39 | addrs = list(allAddrs) 40 | selfAddr = addrs.pop(i) 41 | p = Popen(cmd + [selfAddr] + addrs, stdin=PIPE) 42 | processes.append(p) 43 | for i in range(numNodesReadonly): 44 | p = Popen(cmd + ['readonly'] + allAddrs, stdin=PIPE) 45 | processes.append(p) 46 | errRates = [] 47 | for p in processes: 48 | p.communicate() 49 | errRates.append(float(p.returncode) / 100.0) 50 | avgRate = sum(errRates) / len(errRates) 51 | print('average success rate:', avgRate) 52 | if delay: 53 | return avgRate 54 | return avgRate >= 0.9 55 | 56 | def doDetectMaxRps(requestSize, numNodes): 57 | a = MIN_RPS 58 | b = MAX_RPS 59 | numIt = 0 60 | while b - a > MIN_RPS: 61 | c = a + (b - a) / 2 62 | res = singleBenchmark(c, requestSize, numNodes) 63 | if res: 64 | a = c 65 | else: 66 | b = c 67 | print('subiteration %d, current max %d' % (numIt, a)) 68 | numIt += 1 69 | return a 70 | 71 | @memoize('maxRpsCache.bin') 72 | def detectMaxRps(requestSize, numNodes): 73 | results = [] 74 | for i in range(0, 5): 75 | res = doDetectMaxRps(requestSize, numNodes) 76 | print('iteration %d, current max %d' % (i, res)) 77 | results.append(res) 78 | return sorted(results)[len(results) / 2] 79 | 80 | def printUsage(): 81 | print('Usage: %s mode(delay/rps/custom)' % sys.argv[0]) 82 | sys.exit(-1) 83 | 84 | if __name__ == '__main__': 85 | 86 | if len(sys.argv) != 2: 87 | printUsage() 88 | 89 | mode = sys.argv[1] 90 | if mode == 'delay': 91 | print('Average delay:', singleBenchmark(50, 10, 5, delay=True)) 92 | elif mode == 'rps': 93 | for i in range(10, 2100, 500): 94 | res = detectMaxRps(i, 3) 95 | print('request size: %d, rps: %d' % (i, int(res))) 96 | 97 | for i in range(3, 8): 98 | res = detectMaxRps(200, i) 99 | print('nodes number: %d, rps: %d' % (i, int(res))) 100 | elif mode == 'custom': 101 | singleBenchmark(25000, 10, 3) 102 | else: 103 | printUsage() 104 | -------------------------------------------------------------------------------- /benchmarks/testobj.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import sys 3 | import time 4 | import random 5 | from collections import defaultdict 6 | sys.path.append("../") 7 | from pysyncobj import SyncObj, replicated, SyncObjConf, FAIL_REASON 8 | 9 | class TestObj(SyncObj): 10 | 11 | def __init__(self, selfNodeAddr, otherNodeAddrs): 12 | super(TestObj, self).__init__(selfNodeAddr, otherNodeAddrs) 13 | self.__appliedCommands = 0 14 | 15 | @replicated 16 | def testMethod(self, value): 17 | self.__appliedCommands += 1 18 | 19 | def getNumCommandsApplied(self): 20 | return self.__appliedCommands 21 | 22 | _g_sent = 0 23 | _g_success = 0 24 | _g_error = 0 25 | _g_errors = defaultdict(int) 26 | 27 | def clbck(res, err): 28 | global _g_error, _g_success 29 | if err == FAIL_REASON.SUCCESS: 30 | _g_success += 1 31 | else: 32 | _g_error += 1 33 | _g_errors[err] += 1 34 | 35 | def getRandStr(l): 36 | f = '%0' + str(l) + 'x' 37 | return f % random.randrange(16 ** l) 38 | 39 | if __name__ == '__main__': 40 | if len(sys.argv) < 5: 41 | print('Usage: %s RPS requestSize selfHost:port partner1Host:port partner2Host:port ...' % sys.argv[0]) 42 | sys.exit(-1) 43 | 44 | numCommands = int(sys.argv[1]) 45 | cmdSize = int(sys.argv[2]) 46 | 47 | selfAddr = sys.argv[3] 48 | if selfAddr == 'readonly': 49 | selfAddr = None 50 | partners = sys.argv[4:] 51 | 52 | maxCommandsQueueSize = int(0.9 * SyncObjConf().commandsQueueSize / len(partners)) 53 | 54 | obj = TestObj(selfAddr, partners) 55 | 56 | while obj._getLeader() is None: 57 | time.sleep(0.5) 58 | 59 | time.sleep(4.0) 60 | 61 | startTime = time.time() 62 | 63 | while time.time() - startTime < 25.0: 64 | st = time.time() 65 | for i in xrange(0, numCommands): 66 | obj.testMethod(getRandStr(cmdSize), callback=clbck) 67 | _g_sent += 1 68 | delta = time.time() - st 69 | assert delta <= 1.0 70 | time.sleep(1.0 - delta) 71 | 72 | time.sleep(4.0) 73 | 74 | successRate = float(_g_success) / float(_g_sent) 75 | print('SUCCESS RATE:', successRate) 76 | 77 | if successRate < 0.9: 78 | print('LOST RATE:', 1.0 - float(_g_success + _g_error) / float(_g_sent)) 79 | print('ERRORS STATS: %d' % len(_g_errors)) 80 | for err in _g_errors: 81 | print(err, float(_g_errors[err]) / float(_g_error)) 82 | 83 | sys.exit(int(successRate * 100)) 84 | -------------------------------------------------------------------------------- /benchmarks/testobj_delay.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import sys 3 | import time 4 | import random 5 | from collections import defaultdict 6 | sys.path.append("../") 7 | from pysyncobj import SyncObj, replicated, SyncObjConf, FAIL_REASON 8 | 9 | class TestObj(SyncObj): 10 | 11 | def __init__(self, selfNodeAddr, otherNodeAddrs): 12 | cfg = SyncObjConf( 13 | appendEntriesUseBatch=False, 14 | ) 15 | super(TestObj, self).__init__(selfNodeAddr, otherNodeAddrs, cfg) 16 | self.__appliedCommands = 0 17 | 18 | @replicated 19 | def testMethod(self, val, callTime): 20 | self.__appliedCommands += 1 21 | return (callTime, time.time()) 22 | 23 | def getNumCommandsApplied(self): 24 | return self.__appliedCommands 25 | 26 | _g_sent = 0 27 | _g_success = 0 28 | _g_error = 0 29 | _g_errors = defaultdict(int) 30 | _g_delays = [] 31 | 32 | def clbck(res, err): 33 | global _g_error, _g_success, _g_delays 34 | if err == FAIL_REASON.SUCCESS: 35 | _g_success += 1 36 | callTime, recvTime = res 37 | delay = time.time() - callTime 38 | _g_delays.append(delay) 39 | else: 40 | _g_error += 1 41 | _g_errors[err] += 1 42 | 43 | def getRandStr(l): 44 | f = '%0' + str(l) + 'x' 45 | return f % random.randrange(16 ** l) 46 | 47 | if __name__ == '__main__': 48 | if len(sys.argv) < 5: 49 | print('Usage: %s RPS requestSize selfHost:port partner1Host:port partner2Host:port ...' % sys.argv[0]) 50 | sys.exit(-1) 51 | 52 | numCommands = int(sys.argv[1]) 53 | cmdSize = int(sys.argv[2]) 54 | 55 | selfAddr = sys.argv[3] 56 | if selfAddr == 'readonly': 57 | selfAddr = None 58 | partners = sys.argv[4:] 59 | 60 | maxCommandsQueueSize = int(0.9 * SyncObjConf().commandsQueueSize / len(partners)) 61 | 62 | obj = TestObj(selfAddr, partners) 63 | 64 | while obj._getLeader() is None: 65 | time.sleep(0.5) 66 | 67 | time.sleep(4.0) 68 | 69 | startTime = time.time() 70 | 71 | while time.time() - startTime < 25.0: 72 | st = time.time() 73 | for i in xrange(0, numCommands): 74 | obj.testMethod(getRandStr(cmdSize), time.time(), callback=clbck) 75 | _g_sent += 1 76 | delta = time.time() - st 77 | assert delta <= 1.0 78 | time.sleep(1.0 - delta) 79 | 80 | time.sleep(4.0) 81 | 82 | successRate = float(_g_success) / float(_g_sent) 83 | print('SUCCESS RATE:', successRate) 84 | 85 | delays = sorted(_g_delays) 86 | avgDelay = _g_delays[len(_g_delays) / 2] 87 | print('AVG DELAY:', avgDelay) 88 | 89 | if successRate < 0.9: 90 | print('LOST RATE:', 1.0 - float(_g_success + _g_error) / float(_g_sent)) 91 | print('ERRORS STATS: %d' % len(_g_errors)) 92 | for err in _g_errors: 93 | print(err, float(_g_errors[err]) / float(_g_error)) 94 | 95 | sys.exit(int(avgDelay * 100)) 96 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 14 | # the i18n builder cannot share the environment and doctrees with the others 15 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 16 | 17 | .PHONY: help 18 | help: 19 | @echo "Please use \`make ' where is one of" 20 | @echo " html to make standalone HTML files" 21 | @echo " dirhtml to make HTML files named index.html in directories" 22 | @echo " singlehtml to make a single large HTML file" 23 | @echo " pickle to make pickle files" 24 | @echo " json to make JSON files" 25 | @echo " htmlhelp to make HTML files and a HTML help project" 26 | @echo " qthelp to make HTML files and a qthelp project" 27 | @echo " applehelp to make an Apple Help Book" 28 | @echo " devhelp to make HTML files and a Devhelp project" 29 | @echo " epub to make an epub" 30 | @echo " epub3 to make an epub3" 31 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 32 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 33 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 34 | @echo " text to make text files" 35 | @echo " man to make manual pages" 36 | @echo " texinfo to make Texinfo files" 37 | @echo " info to make Texinfo files and run them through makeinfo" 38 | @echo " gettext to make PO message catalogs" 39 | @echo " changes to make an overview of all changed/added/deprecated items" 40 | @echo " xml to make Docutils-native XML files" 41 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 42 | @echo " linkcheck to check all external links for integrity" 43 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 44 | @echo " coverage to run coverage check of the documentation (if enabled)" 45 | @echo " dummy to check syntax errors of document sources" 46 | 47 | .PHONY: clean 48 | clean: 49 | rm -rf $(BUILDDIR)/* 50 | 51 | .PHONY: html 52 | html: 53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | @echo 55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 56 | 57 | .PHONY: dirhtml 58 | dirhtml: 59 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 60 | @echo 61 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 62 | 63 | .PHONY: singlehtml 64 | singlehtml: 65 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 66 | @echo 67 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 68 | 69 | .PHONY: pickle 70 | pickle: 71 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 72 | @echo 73 | @echo "Build finished; now you can process the pickle files." 74 | 75 | .PHONY: json 76 | json: 77 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 78 | @echo 79 | @echo "Build finished; now you can process the JSON files." 80 | 81 | .PHONY: htmlhelp 82 | htmlhelp: 83 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 84 | @echo 85 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 86 | ".hhp project file in $(BUILDDIR)/htmlhelp." 87 | 88 | .PHONY: qthelp 89 | qthelp: 90 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 91 | @echo 92 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 93 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 94 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PySyncObj.qhcp" 95 | @echo "To view the help file:" 96 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PySyncObj.qhc" 97 | 98 | .PHONY: applehelp 99 | applehelp: 100 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 101 | @echo 102 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 103 | @echo "N.B. You won't be able to view it unless you put it in" \ 104 | "~/Library/Documentation/Help or install it in your application" \ 105 | "bundle." 106 | 107 | .PHONY: devhelp 108 | devhelp: 109 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 110 | @echo 111 | @echo "Build finished." 112 | @echo "To view the help file:" 113 | @echo "# mkdir -p $$HOME/.local/share/devhelp/PySyncObj" 114 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PySyncObj" 115 | @echo "# devhelp" 116 | 117 | .PHONY: epub 118 | epub: 119 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 120 | @echo 121 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 122 | 123 | .PHONY: epub3 124 | epub3: 125 | $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 126 | @echo 127 | @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." 128 | 129 | .PHONY: latex 130 | latex: 131 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 132 | @echo 133 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 134 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 135 | "(use \`make latexpdf' here to do that automatically)." 136 | 137 | .PHONY: latexpdf 138 | latexpdf: 139 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 140 | @echo "Running LaTeX files through pdflatex..." 141 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 142 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 143 | 144 | .PHONY: latexpdfja 145 | latexpdfja: 146 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 147 | @echo "Running LaTeX files through platex and dvipdfmx..." 148 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 149 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 150 | 151 | .PHONY: text 152 | text: 153 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 154 | @echo 155 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 156 | 157 | .PHONY: man 158 | man: 159 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 160 | @echo 161 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 162 | 163 | .PHONY: texinfo 164 | texinfo: 165 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 166 | @echo 167 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 168 | @echo "Run \`make' in that directory to run these through makeinfo" \ 169 | "(use \`make info' here to do that automatically)." 170 | 171 | .PHONY: info 172 | info: 173 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 174 | @echo "Running Texinfo files through makeinfo..." 175 | make -C $(BUILDDIR)/texinfo info 176 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 177 | 178 | .PHONY: gettext 179 | gettext: 180 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 181 | @echo 182 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 183 | 184 | .PHONY: changes 185 | changes: 186 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 187 | @echo 188 | @echo "The overview file is in $(BUILDDIR)/changes." 189 | 190 | .PHONY: linkcheck 191 | linkcheck: 192 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 193 | @echo 194 | @echo "Link check complete; look for any errors in the above output " \ 195 | "or in $(BUILDDIR)/linkcheck/output.txt." 196 | 197 | .PHONY: doctest 198 | doctest: 199 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 200 | @echo "Testing of doctests in the sources finished, look at the " \ 201 | "results in $(BUILDDIR)/doctest/output.txt." 202 | 203 | .PHONY: coverage 204 | coverage: 205 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 206 | @echo "Testing of coverage in the sources finished, look at the " \ 207 | "results in $(BUILDDIR)/coverage/python.txt." 208 | 209 | .PHONY: xml 210 | xml: 211 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 212 | @echo 213 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 214 | 215 | .PHONY: pseudoxml 216 | pseudoxml: 217 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 218 | @echo 219 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 220 | 221 | .PHONY: dummy 222 | dummy: 223 | $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy 224 | @echo 225 | @echo "Build finished. Dummy builder generates no files." 226 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% source 10 | set I18NSPHINXOPTS=%SPHINXOPTS% source 11 | if NOT "%PAPER%" == "" ( 12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% 14 | ) 15 | 16 | if "%1" == "" goto help 17 | 18 | if "%1" == "help" ( 19 | :help 20 | echo.Please use `make ^` where ^ is one of 21 | echo. html to make standalone HTML files 22 | echo. dirhtml to make HTML files named index.html in directories 23 | echo. singlehtml to make a single large HTML file 24 | echo. pickle to make pickle files 25 | echo. json to make JSON files 26 | echo. htmlhelp to make HTML files and a HTML help project 27 | echo. qthelp to make HTML files and a qthelp project 28 | echo. devhelp to make HTML files and a Devhelp project 29 | echo. epub to make an epub 30 | echo. epub3 to make an epub3 31 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 32 | echo. text to make text files 33 | echo. man to make manual pages 34 | echo. texinfo to make Texinfo files 35 | echo. gettext to make PO message catalogs 36 | echo. changes to make an overview over all changed/added/deprecated items 37 | echo. xml to make Docutils-native XML files 38 | echo. pseudoxml to make pseudoxml-XML files for display purposes 39 | echo. linkcheck to check all external links for integrity 40 | echo. doctest to run all doctests embedded in the documentation if enabled 41 | echo. coverage to run coverage check of the documentation if enabled 42 | echo. dummy to check syntax errors of document sources 43 | goto end 44 | ) 45 | 46 | if "%1" == "clean" ( 47 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 48 | del /q /s %BUILDDIR%\* 49 | goto end 50 | ) 51 | 52 | 53 | REM Check if sphinx-build is available and fallback to Python version if any 54 | %SPHINXBUILD% 1>NUL 2>NUL 55 | if errorlevel 9009 goto sphinx_python 56 | goto sphinx_ok 57 | 58 | :sphinx_python 59 | 60 | set SPHINXBUILD=python -m sphinx.__init__ 61 | %SPHINXBUILD% 2> nul 62 | if errorlevel 9009 ( 63 | echo. 64 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 65 | echo.installed, then set the SPHINXBUILD environment variable to point 66 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 67 | echo.may add the Sphinx directory to PATH. 68 | echo. 69 | echo.If you don't have Sphinx installed, grab it from 70 | echo.http://sphinx-doc.org/ 71 | exit /b 1 72 | ) 73 | 74 | :sphinx_ok 75 | 76 | 77 | if "%1" == "html" ( 78 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 79 | if errorlevel 1 exit /b 1 80 | echo. 81 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 82 | goto end 83 | ) 84 | 85 | if "%1" == "dirhtml" ( 86 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 87 | if errorlevel 1 exit /b 1 88 | echo. 89 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 90 | goto end 91 | ) 92 | 93 | if "%1" == "singlehtml" ( 94 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 95 | if errorlevel 1 exit /b 1 96 | echo. 97 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 98 | goto end 99 | ) 100 | 101 | if "%1" == "pickle" ( 102 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 103 | if errorlevel 1 exit /b 1 104 | echo. 105 | echo.Build finished; now you can process the pickle files. 106 | goto end 107 | ) 108 | 109 | if "%1" == "json" ( 110 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 111 | if errorlevel 1 exit /b 1 112 | echo. 113 | echo.Build finished; now you can process the JSON files. 114 | goto end 115 | ) 116 | 117 | if "%1" == "htmlhelp" ( 118 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 119 | if errorlevel 1 exit /b 1 120 | echo. 121 | echo.Build finished; now you can run HTML Help Workshop with the ^ 122 | .hhp project file in %BUILDDIR%/htmlhelp. 123 | goto end 124 | ) 125 | 126 | if "%1" == "qthelp" ( 127 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 128 | if errorlevel 1 exit /b 1 129 | echo. 130 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 131 | .qhcp project file in %BUILDDIR%/qthelp, like this: 132 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\PySyncObj.qhcp 133 | echo.To view the help file: 134 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\PySyncObj.ghc 135 | goto end 136 | ) 137 | 138 | if "%1" == "devhelp" ( 139 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 140 | if errorlevel 1 exit /b 1 141 | echo. 142 | echo.Build finished. 143 | goto end 144 | ) 145 | 146 | if "%1" == "epub" ( 147 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 148 | if errorlevel 1 exit /b 1 149 | echo. 150 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 151 | goto end 152 | ) 153 | 154 | if "%1" == "epub3" ( 155 | %SPHINXBUILD% -b epub3 %ALLSPHINXOPTS% %BUILDDIR%/epub3 156 | if errorlevel 1 exit /b 1 157 | echo. 158 | echo.Build finished. The epub3 file is in %BUILDDIR%/epub3. 159 | goto end 160 | ) 161 | 162 | if "%1" == "latex" ( 163 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 164 | if errorlevel 1 exit /b 1 165 | echo. 166 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 167 | goto end 168 | ) 169 | 170 | if "%1" == "latexpdf" ( 171 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 172 | cd %BUILDDIR%/latex 173 | make all-pdf 174 | cd %~dp0 175 | echo. 176 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 177 | goto end 178 | ) 179 | 180 | if "%1" == "latexpdfja" ( 181 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 182 | cd %BUILDDIR%/latex 183 | make all-pdf-ja 184 | cd %~dp0 185 | echo. 186 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 187 | goto end 188 | ) 189 | 190 | if "%1" == "text" ( 191 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 192 | if errorlevel 1 exit /b 1 193 | echo. 194 | echo.Build finished. The text files are in %BUILDDIR%/text. 195 | goto end 196 | ) 197 | 198 | if "%1" == "man" ( 199 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 200 | if errorlevel 1 exit /b 1 201 | echo. 202 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 203 | goto end 204 | ) 205 | 206 | if "%1" == "texinfo" ( 207 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo 208 | if errorlevel 1 exit /b 1 209 | echo. 210 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. 211 | goto end 212 | ) 213 | 214 | if "%1" == "gettext" ( 215 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale 216 | if errorlevel 1 exit /b 1 217 | echo. 218 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale. 219 | goto end 220 | ) 221 | 222 | if "%1" == "changes" ( 223 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 224 | if errorlevel 1 exit /b 1 225 | echo. 226 | echo.The overview file is in %BUILDDIR%/changes. 227 | goto end 228 | ) 229 | 230 | if "%1" == "linkcheck" ( 231 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 232 | if errorlevel 1 exit /b 1 233 | echo. 234 | echo.Link check complete; look for any errors in the above output ^ 235 | or in %BUILDDIR%/linkcheck/output.txt. 236 | goto end 237 | ) 238 | 239 | if "%1" == "doctest" ( 240 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 241 | if errorlevel 1 exit /b 1 242 | echo. 243 | echo.Testing of doctests in the sources finished, look at the ^ 244 | results in %BUILDDIR%/doctest/output.txt. 245 | goto end 246 | ) 247 | 248 | if "%1" == "coverage" ( 249 | %SPHINXBUILD% -b coverage %ALLSPHINXOPTS% %BUILDDIR%/coverage 250 | if errorlevel 1 exit /b 1 251 | echo. 252 | echo.Testing of coverage in the sources finished, look at the ^ 253 | results in %BUILDDIR%/coverage/python.txt. 254 | goto end 255 | ) 256 | 257 | if "%1" == "xml" ( 258 | %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml 259 | if errorlevel 1 exit /b 1 260 | echo. 261 | echo.Build finished. The XML files are in %BUILDDIR%/xml. 262 | goto end 263 | ) 264 | 265 | if "%1" == "pseudoxml" ( 266 | %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml 267 | if errorlevel 1 exit /b 1 268 | echo. 269 | echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. 270 | goto end 271 | ) 272 | 273 | if "%1" == "dummy" ( 274 | %SPHINXBUILD% -b dummy %ALLSPHINXOPTS% %BUILDDIR%/dummy 275 | if errorlevel 1 exit /b 1 276 | echo. 277 | echo.Build finished. Dummy builder generates no files. 278 | goto end 279 | ) 280 | 281 | :end 282 | -------------------------------------------------------------------------------- /docs/source/batteries.rst: -------------------------------------------------------------------------------- 1 | pysyncobj.batteries package 2 | =========================== 3 | 4 | ReplCounter 5 | ----------- 6 | 7 | .. autoclass:: pysyncobj.batteries.ReplCounter 8 | :members: 9 | 10 | ReplList 11 | -------- 12 | 13 | .. autoclass:: pysyncobj.batteries.ReplList 14 | :members: 15 | 16 | ReplDict 17 | -------- 18 | 19 | .. autoclass:: pysyncobj.batteries.ReplDict 20 | :members: 21 | 22 | ReplSet 23 | ------- 24 | 25 | .. autoclass:: pysyncobj.batteries.ReplSet 26 | :members: 27 | 28 | ReplQueue 29 | --------- 30 | 31 | .. autoclass:: pysyncobj.batteries.ReplQueue 32 | :members: 33 | 34 | ReplPriorityQueue 35 | ----------------- 36 | 37 | .. autoclass:: pysyncobj.batteries.ReplPriorityQueue 38 | :members: 39 | 40 | ReplLockManager 41 | --------------- 42 | 43 | .. autoclass:: pysyncobj.batteries.ReplLockManager 44 | :members: 45 | 46 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # PySyncObj documentation build configuration file, created by 4 | # sphinx-quickstart on Sat Sep 17 17:25:17 2016. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | # If extensions (or modules to document with autodoc) are in another directory, 16 | # add these directories to sys.path here. If the directory is relative to the 17 | # documentation root, use os.path.abspath to make it absolute, like shown here. 18 | # 19 | import os 20 | import sys 21 | import sphinx_rtd_theme 22 | 23 | sys.path.insert(0, os.path.abspath('../..')) 24 | 25 | # -- General configuration ------------------------------------------------ 26 | 27 | # If your documentation needs a minimal Sphinx version, state it here. 28 | # 29 | # needs_sphinx = '1.0' 30 | 31 | # Add any Sphinx extension module names here, as strings. They can be 32 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 33 | # ones. 34 | extensions = [ 35 | 'sphinx.ext.autodoc', 36 | ] 37 | 38 | html_theme = "sphinx_rtd_theme" 39 | html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 40 | 41 | # Add any paths that contain templates here, relative to this directory. 42 | templates_path = ['_templates'] 43 | 44 | # The suffix(es) of source filenames. 45 | # You can specify multiple suffix as a list of string: 46 | # 47 | # source_suffix = ['.rst', '.md'] 48 | source_suffix = '.rst' 49 | 50 | # The encoding of source files. 51 | # 52 | # source_encoding = 'utf-8-sig' 53 | 54 | # The master toctree document. 55 | master_doc = 'index' 56 | 57 | # General information about the project. 58 | project = u'PySyncObj' 59 | copyright = u'2021, Filipp Ozinov' 60 | author = u'Filipp Ozinov' 61 | 62 | # The version info for the project you're documenting, acts as replacement for 63 | # |version| and |release|, also used in various other places throughout the 64 | # built documents. 65 | # 66 | # The short X.Y version. 67 | version = u'0.3.14' 68 | # The full version, including alpha/beta/rc tags. 69 | release = u'0.3.14' 70 | 71 | # The language for content autogenerated by Sphinx. Refer to documentation 72 | # for a list of supported languages. 73 | # 74 | # This is also used if you do content translation via gettext catalogs. 75 | # Usually you set "language" from the command line for these cases. 76 | language = None 77 | 78 | # There are two options for replacing |today|: either, you set today to some 79 | # non-false value, then it is used: 80 | # 81 | # today = '' 82 | # 83 | # Else, today_fmt is used as the format for a strftime call. 84 | # 85 | # today_fmt = '%B %d, %Y' 86 | 87 | # List of patterns, relative to source directory, that match files and 88 | # directories to ignore when looking for source files. 89 | # This patterns also effect to html_static_path and html_extra_path 90 | exclude_patterns = [] 91 | 92 | # The reST default role (used for this markup: `text`) to use for all 93 | # documents. 94 | # 95 | # default_role = None 96 | 97 | # If true, '()' will be appended to :func: etc. cross-reference text. 98 | # 99 | # add_function_parentheses = True 100 | 101 | # If true, the current module name will be prepended to all description 102 | # unit titles (such as .. function::). 103 | # 104 | # add_module_names = True 105 | 106 | # If true, sectionauthor and moduleauthor directives will be shown in the 107 | # output. They are ignored by default. 108 | # 109 | # show_authors = False 110 | 111 | # The name of the Pygments (syntax highlighting) style to use. 112 | pygments_style = 'sphinx' 113 | 114 | # A list of ignored prefixes for module index sorting. 115 | # modindex_common_prefix = [] 116 | 117 | # If true, keep warnings as "system message" paragraphs in the built documents. 118 | # keep_warnings = False 119 | 120 | # If true, `todo` and `todoList` produce output, else they produce nothing. 121 | todo_include_todos = False 122 | 123 | 124 | # -- Options for HTML output ---------------------------------------------- 125 | 126 | # The theme to use for HTML and HTML Help pages. See the documentation for 127 | # a list of builtin themes. 128 | # 129 | #html_theme = 'alabaster' 130 | 131 | # Theme options are theme-specific and customize the look and feel of a theme 132 | # further. For a list of options available for each theme, see the 133 | # documentation. 134 | # 135 | # html_theme_options = {} 136 | 137 | # Add any paths that contain custom themes here, relative to this directory. 138 | # html_theme_path = [] 139 | 140 | # The name for this set of Sphinx documents. 141 | # " v documentation" by default. 142 | # 143 | # html_title = u'PySyncObj v0.2.3' 144 | 145 | # A shorter title for the navigation bar. Default is the same as html_title. 146 | # 147 | # html_short_title = None 148 | 149 | # The name of an image file (relative to this directory) to place at the top 150 | # of the sidebar. 151 | # 152 | # html_logo = None 153 | 154 | # The name of an image file (relative to this directory) to use as a favicon of 155 | # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 156 | # pixels large. 157 | # 158 | # html_favicon = None 159 | 160 | # Add any paths that contain custom static files (such as style sheets) here, 161 | # relative to this directory. They are copied after the builtin static files, 162 | # so a file named "default.css" will overwrite the builtin "default.css". 163 | html_static_path = ['_static'] 164 | 165 | # Add any extra paths that contain custom files (such as robots.txt or 166 | # .htaccess) here, relative to this directory. These files are copied 167 | # directly to the root of the documentation. 168 | # 169 | # html_extra_path = [] 170 | 171 | # If not None, a 'Last updated on:' timestamp is inserted at every page 172 | # bottom, using the given strftime format. 173 | # The empty string is equivalent to '%b %d, %Y'. 174 | # 175 | # html_last_updated_fmt = None 176 | 177 | # If true, SmartyPants will be used to convert quotes and dashes to 178 | # typographically correct entities. 179 | # 180 | # html_use_smartypants = True 181 | 182 | # Custom sidebar templates, maps document names to template names. 183 | # 184 | # html_sidebars = {} 185 | 186 | # Additional templates that should be rendered to pages, maps page names to 187 | # template names. 188 | # 189 | # html_additional_pages = {} 190 | 191 | # If false, no module index is generated. 192 | # 193 | # html_domain_indices = True 194 | 195 | # If false, no index is generated. 196 | # 197 | # html_use_index = True 198 | 199 | # If true, the index is split into individual pages for each letter. 200 | # 201 | # html_split_index = False 202 | 203 | # If true, links to the reST sources are added to the pages. 204 | # 205 | # html_show_sourcelink = True 206 | 207 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 208 | # 209 | # html_show_sphinx = True 210 | 211 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 212 | # 213 | # html_show_copyright = True 214 | 215 | # If true, an OpenSearch description file will be output, and all pages will 216 | # contain a tag referring to it. The value of this option must be the 217 | # base URL from which the finished HTML is served. 218 | # 219 | # html_use_opensearch = '' 220 | 221 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 222 | # html_file_suffix = None 223 | 224 | # Language to be used for generating the HTML full-text search index. 225 | # Sphinx supports the following languages: 226 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 227 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' 228 | # 229 | # html_search_language = 'en' 230 | 231 | # A dictionary with options for the search language support, empty by default. 232 | # 'ja' uses this config value. 233 | # 'zh' user can custom change `jieba` dictionary path. 234 | # 235 | # html_search_options = {'type': 'default'} 236 | 237 | # The name of a javascript file (relative to the configuration directory) that 238 | # implements a search results scorer. If empty, the default will be used. 239 | # 240 | # html_search_scorer = 'scorer.js' 241 | 242 | # Output file base name for HTML help builder. 243 | htmlhelp_basename = 'PySyncObjdoc' 244 | 245 | # -- Options for LaTeX output --------------------------------------------- 246 | 247 | latex_elements = { 248 | # The paper size ('letterpaper' or 'a4paper'). 249 | # 250 | # 'papersize': 'letterpaper', 251 | 252 | # The font size ('10pt', '11pt' or '12pt'). 253 | # 254 | # 'pointsize': '10pt', 255 | 256 | # Additional stuff for the LaTeX preamble. 257 | # 258 | # 'preamble': '', 259 | 260 | # Latex figure (float) alignment 261 | # 262 | # 'figure_align': 'htbp', 263 | } 264 | 265 | # Grouping the document tree into LaTeX files. List of tuples 266 | # (source start file, target name, title, 267 | # author, documentclass [howto, manual, or own class]). 268 | latex_documents = [ 269 | (master_doc, 'PySyncObj.tex', u'PySyncObj Documentation', 270 | u'Filipp Ozinov', 'manual'), 271 | ] 272 | 273 | # The name of an image file (relative to this directory) to place at the top of 274 | # the title page. 275 | # 276 | # latex_logo = None 277 | 278 | # For "manual" documents, if this is true, then toplevel headings are parts, 279 | # not chapters. 280 | # 281 | # latex_use_parts = False 282 | 283 | # If true, show page references after internal links. 284 | # 285 | # latex_show_pagerefs = False 286 | 287 | # If true, show URL addresses after external links. 288 | # 289 | # latex_show_urls = False 290 | 291 | # Documents to append as an appendix to all manuals. 292 | # 293 | # latex_appendices = [] 294 | 295 | # It false, will not define \strong, \code, itleref, \crossref ... but only 296 | # \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added 297 | # packages. 298 | # 299 | # latex_keep_old_macro_names = True 300 | 301 | # If false, no module index is generated. 302 | # 303 | # latex_domain_indices = True 304 | 305 | 306 | # -- Options for manual page output --------------------------------------- 307 | 308 | # One entry per manual page. List of tuples 309 | # (source start file, name, description, authors, manual section). 310 | man_pages = [ 311 | (master_doc, 'pysyncobj', u'PySyncObj Documentation', 312 | [author], 1) 313 | ] 314 | 315 | # If true, show URL addresses after external links. 316 | # 317 | # man_show_urls = False 318 | 319 | 320 | # -- Options for Texinfo output ------------------------------------------- 321 | 322 | # Grouping the document tree into Texinfo files. List of tuples 323 | # (source start file, target name, title, author, 324 | # dir menu entry, description, category) 325 | texinfo_documents = [ 326 | (master_doc, 'PySyncObj', u'PySyncObj Documentation', 327 | author, 'PySyncObj', 'One line description of project.', 328 | 'Miscellaneous'), 329 | ] 330 | 331 | # Documents to append as an appendix to all manuals. 332 | # 333 | # texinfo_appendices = [] 334 | 335 | # If false, no module index is generated. 336 | # 337 | # texinfo_domain_indices = True 338 | 339 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 340 | # 341 | # texinfo_show_urls = 'footnote' 342 | 343 | # If true, do not generate a @detailmenu in the "Top" node's menu. 344 | # 345 | # texinfo_no_detailmenu = False 346 | 347 | autoclass_content = "both" 348 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | PySyncObj API documentation 2 | =========================== 3 | 4 | * The code is available on GitHub at `bakwc/PySyncObj`_ 5 | 6 | .. _bakwc/PySyncObj: https://github.com/bakwc/PySyncObj 7 | 8 | 9 | Contents: 10 | 11 | .. toctree:: 12 | :maxdepth: 2 13 | 14 | pysyncobj 15 | batteries 16 | 17 | 18 | Indices and tables 19 | ================== 20 | 21 | * :ref:`genindex` 22 | * :ref:`search` 23 | -------------------------------------------------------------------------------- /docs/source/pysyncobj.rst: -------------------------------------------------------------------------------- 1 | pysyncobj package 2 | ================= 3 | 4 | SyncObj 5 | ------- 6 | 7 | .. autoclass:: pysyncobj.SyncObj 8 | :members: 9 | 10 | replicated 11 | ---------- 12 | 13 | .. autofunction:: pysyncobj.replicated 14 | 15 | replicated_sync 16 | --------------- 17 | .. autofunction:: pysyncobj.replicated_sync 18 | 19 | SyncObjConf 20 | ----------- 21 | 22 | .. autoclass:: pysyncobj.SyncObjConf 23 | :members: 24 | 25 | FAIL_REASON 26 | ----------- 27 | 28 | .. autoclass:: pysyncobj.FAIL_REASON 29 | :members: 30 | 31 | 32 | SERIALIZER_STATE 33 | ---------------- 34 | 35 | .. autoclass:: pysyncobj.SERIALIZER_STATE 36 | :members: 37 | 38 | -------------------------------------------------------------------------------- /examples/counter.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import print_function 3 | 4 | import sys 5 | import time 6 | from functools import partial 7 | sys.path.append("../") 8 | from pysyncobj import SyncObj, replicated 9 | 10 | 11 | class TestObj(SyncObj): 12 | 13 | def __init__(self, selfNodeAddr, otherNodeAddrs): 14 | super(TestObj, self).__init__(selfNodeAddr, otherNodeAddrs) 15 | self.__counter = 0 16 | 17 | @replicated 18 | def incCounter(self): 19 | self.__counter += 1 20 | return self.__counter 21 | 22 | @replicated 23 | def addValue(self, value, cn): 24 | self.__counter += value 25 | return self.__counter, cn 26 | 27 | def getCounter(self): 28 | return self.__counter 29 | 30 | 31 | def onAdd(res, err, cnt): 32 | print('onAdd %d:' % cnt, res, err) 33 | 34 | if __name__ == '__main__': 35 | if len(sys.argv) < 3: 36 | print('Usage: %s self_port partner1_port partner2_port ...' % sys.argv[0]) 37 | sys.exit(-1) 38 | 39 | port = int(sys.argv[1]) 40 | partners = ['localhost:%d' % int(p) for p in sys.argv[2:]] 41 | o = TestObj('localhost:%d' % port, partners) 42 | n = 0 43 | old_value = -1 44 | while True: 45 | # time.sleep(0.005) 46 | time.sleep(0.5) 47 | if o.getCounter() != old_value: 48 | old_value = o.getCounter() 49 | print(old_value) 50 | if o._getLeader() is None: 51 | continue 52 | # if n < 2000: 53 | if n < 20: 54 | o.addValue(10, n, callback=partial(onAdd, cnt=n)) 55 | n += 1 56 | # if n % 200 == 0: 57 | # if True: 58 | # print('Counter value:', o.getCounter(), o._getLeader(), o._getRaftLogSize(), o._getLastCommitIndex()) 59 | -------------------------------------------------------------------------------- /examples/kvstorage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import print_function 3 | 4 | import sys 5 | sys.path.append("../") 6 | from pysyncobj import SyncObj, SyncObjConf, replicated 7 | 8 | 9 | class KVStorage(SyncObj): 10 | def __init__(self, selfAddress, partnerAddrs): 11 | cfg = SyncObjConf(dynamicMembershipChange = True) 12 | super(KVStorage, self).__init__(selfAddress, partnerAddrs, cfg) 13 | self.__data = {} 14 | 15 | @replicated 16 | def set(self, key, value): 17 | self.__data[key] = value 18 | 19 | @replicated 20 | def pop(self, key): 21 | self.__data.pop(key, None) 22 | 23 | def get(self, key): 24 | return self.__data.get(key, None) 25 | 26 | _g_kvstorage = None 27 | 28 | 29 | def main(): 30 | if len(sys.argv) < 2: 31 | print('Usage: %s selfHost:port partner1Host:port partner2Host:port ...') 32 | sys.exit(-1) 33 | 34 | selfAddr = sys.argv[1] 35 | if selfAddr == 'readonly': 36 | selfAddr = None 37 | partners = sys.argv[2:] 38 | 39 | global _g_kvstorage 40 | _g_kvstorage = KVStorage(selfAddr, partners) 41 | 42 | def get_input(v): 43 | if sys.version_info >= (3, 0): 44 | return input(v) 45 | else: 46 | return raw_input(v) 47 | 48 | while True: 49 | cmd = get_input(">> ").split() 50 | if not cmd: 51 | continue 52 | elif cmd[0] == 'set': 53 | _g_kvstorage.set(cmd[1], cmd[2]) 54 | elif cmd[0] == 'get': 55 | print(_g_kvstorage.get(cmd[1])) 56 | elif cmd[0] == 'pop': 57 | print(_g_kvstorage.pop(cmd[1])) 58 | else: 59 | print('Wrong command') 60 | 61 | if __name__ == '__main__': 62 | main() 63 | -------------------------------------------------------------------------------- /examples/kvstorage_http.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import print_function 3 | 4 | import sys 5 | try: 6 | from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer 7 | except ImportError: 8 | from http.server import BaseHTTPRequestHandler, HTTPServer 9 | sys.path.append("../") 10 | from pysyncobj import SyncObj, SyncObjConf, replicated 11 | 12 | 13 | class KVStorage(SyncObj): 14 | def __init__(self, selfAddress, partnerAddrs, dumpFile): 15 | conf = SyncObjConf( 16 | fullDumpFile=dumpFile, 17 | ) 18 | super(KVStorage, self).__init__(selfAddress, partnerAddrs, conf) 19 | self.__data = {} 20 | 21 | @replicated 22 | def set(self, key, value): 23 | self.__data[key] = value 24 | 25 | @replicated 26 | def pop(self, key): 27 | self.__data.pop(key, None) 28 | 29 | def get(self, key): 30 | return self.__data.get(key, None) 31 | 32 | _g_kvstorage = None 33 | 34 | 35 | class KVRequestHandler(BaseHTTPRequestHandler): 36 | def do_GET(self): 37 | try: 38 | value = _g_kvstorage.get(self.path) 39 | 40 | if value is None: 41 | self.send_response(404) 42 | self.send_header("Content-type", "text/plain") 43 | self.end_headers() 44 | return 45 | 46 | self.send_response(200) 47 | self.send_header("Content-type", "text/plain") 48 | self.end_headers() 49 | self.wfile.write(value.encode('utf-8')) 50 | except: 51 | pass 52 | 53 | def do_POST(self): 54 | try: 55 | key = self.path 56 | value = self.rfile.read(int(self.headers.get('content-length'))).decode('utf-8') 57 | _g_kvstorage.set(key, value) 58 | self.send_response(201) 59 | self.send_header("Content-type", "text/plain") 60 | self.end_headers() 61 | except: 62 | pass 63 | 64 | 65 | def main(): 66 | if len(sys.argv) < 5: 67 | print('Usage: %s http_port dump_file.bin selfHost:port partner1Host:port partner2Host:port ...' % sys.argv[0]) 68 | sys.exit(-1) 69 | 70 | httpPort = int(sys.argv[1]) 71 | dumpFile = sys.argv[2] 72 | selfAddr = sys.argv[3] 73 | partners = sys.argv[4:] 74 | 75 | global _g_kvstorage 76 | _g_kvstorage = KVStorage(selfAddr, partners, dumpFile) 77 | httpServer = HTTPServer(('', httpPort), KVRequestHandler) 78 | httpServer.serve_forever() 79 | 80 | 81 | if __name__ == '__main__': 82 | main() 83 | -------------------------------------------------------------------------------- /examples/lock.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from __future__ import print_function 3 | 4 | import sys 5 | import threading 6 | import weakref 7 | import time 8 | sys.path.append("../") 9 | from pysyncobj import SyncObj, replicated 10 | 11 | 12 | class LockImpl(SyncObj): 13 | def __init__(self, selfAddress, partnerAddrs, autoUnlockTime): 14 | super(LockImpl, self).__init__(selfAddress, partnerAddrs) 15 | self.__locks = {} 16 | self.__autoUnlockTime = autoUnlockTime 17 | 18 | @replicated 19 | def acquire(self, lockPath, clientID, currentTime): 20 | existingLock = self.__locks.get(lockPath, None) 21 | # Auto-unlock old lock 22 | if existingLock is not None: 23 | if currentTime - existingLock[1] > self.__autoUnlockTime: 24 | existingLock = None 25 | # Acquire lock if possible 26 | if existingLock is None or existingLock[0] == clientID: 27 | self.__locks[lockPath] = (clientID, currentTime) 28 | return True 29 | # Lock already acquired by someone else 30 | return False 31 | 32 | @replicated 33 | def ping(self, clientID, currentTime): 34 | for lockPath in self.__locks.keys(): 35 | lockClientID, lockTime = self.__locks[lockPath] 36 | 37 | if currentTime - lockTime > self.__autoUnlockTime: 38 | del self.__locks[lockPath] 39 | continue 40 | 41 | if lockClientID == clientID: 42 | self.__locks[lockPath] = (clientID, currentTime) 43 | 44 | @replicated 45 | def release(self, lockPath, clientID): 46 | existingLock = self.__locks.get(lockPath, None) 47 | if existingLock is not None and existingLock[0] == clientID: 48 | del self.__locks[lockPath] 49 | 50 | def isAcquired(self, lockPath, clientID, currentTime): 51 | existingLock = self.__locks.get(lockPath, None) 52 | if existingLock is not None: 53 | if existingLock[0] == clientID: 54 | if currentTime - existingLock[1] < self.__autoUnlockTime: 55 | return True 56 | return False 57 | 58 | 59 | class Lock(object): 60 | def __init__(self, selfAddress, partnerAddrs, autoUnlockTime): 61 | self.__lockImpl = LockImpl(selfAddress, partnerAddrs, autoUnlockTime) 62 | self.__selfID = selfAddress 63 | self.__autoUnlockTime = autoUnlockTime 64 | self.__mainThread = threading.current_thread() 65 | self.__initialised = threading.Event() 66 | self.__thread = threading.Thread(target=Lock._autoAcquireThread, args=(weakref.proxy(self),)) 67 | self.__thread.start() 68 | while not self.__initialised.is_set(): 69 | pass 70 | 71 | def _autoAcquireThread(self): 72 | self.__initialised.set() 73 | try: 74 | while True: 75 | if not self.__mainThread.is_alive(): 76 | break 77 | time.sleep(float(self.__autoUnlockTime) / 4.0) 78 | if self.__lockImpl._getLeader() is not None: 79 | self.__lockImpl.ping(self.__selfID, time.time()) 80 | except ReferenceError: 81 | pass 82 | 83 | def tryAcquireLock(self, path): 84 | self.__lockImpl.acquire(path, self.__selfID, time.time()) 85 | 86 | def isAcquired(self, path): 87 | return self.__lockImpl.isAcquired(path, self.__selfID, time.time()) 88 | 89 | def release(self, path): 90 | self.__lockImpl.release(path, self.__selfID) 91 | 92 | def printStatus(self): 93 | self.__lockImpl._printStatus() 94 | 95 | 96 | def printHelp(): 97 | print('') 98 | print(' Available commands:') 99 | print('') 100 | print('help print this help') 101 | print('check lockPath check if lock with lockPath path is ackquired or released') 102 | print('acquire lockPath try to ackquire lock with lockPath') 103 | print('release lockPath try to release lock with lockPath') 104 | print('') 105 | print('') 106 | 107 | 108 | def main(): 109 | if len(sys.argv) < 3: 110 | print('Usage: %s selfHost:port partner1Host:port partner2Host:port ...' % sys.argv[0]) 111 | sys.exit(-1) 112 | 113 | selfAddr = sys.argv[1] 114 | partners = sys.argv[2:] 115 | 116 | lock = Lock(selfAddr, partners, 10.0) 117 | 118 | def get_input(v): 119 | if sys.version_info >= (3, 0): 120 | return input(v) 121 | else: 122 | return raw_input(v) 123 | 124 | printHelp() 125 | while True: 126 | cmd = get_input(">> ").split() 127 | if not cmd: 128 | continue 129 | elif cmd[0] == 'help': 130 | printHelp() 131 | elif cmd[0] == 'check': 132 | print('acquired' if lock.isAcquired(cmd[1]) else 'released') 133 | elif cmd[0] == 'acquire': 134 | lock.tryAcquireLock(cmd[1]) 135 | time.sleep(1.5) 136 | print('acquired' if lock.isAcquired(cmd[1]) else 'failed') 137 | elif cmd[0] == 'release': 138 | lock.release(cmd[1]) 139 | time.sleep(1.5) 140 | print('acquired' if lock.isAcquired(cmd[1]) else 'released') 141 | 142 | 143 | if __name__ == '__main__': 144 | main() 145 | -------------------------------------------------------------------------------- /pysyncobj/__init__.py: -------------------------------------------------------------------------------- 1 | from .syncobj import SyncObj, SyncObjException, SyncObjConf, replicated, replicated_sync,\ 2 | FAIL_REASON, _COMMAND_TYPE, createJournal, HAS_CRYPTO, SERIALIZER_STATE, SyncObjConsumer, _RAFT_STATE 3 | from .utility import TcpUtility 4 | -------------------------------------------------------------------------------- /pysyncobj/atomic_replace.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import ctypes 4 | 5 | if hasattr(ctypes, 'windll'): # pragma: no cover 6 | CreateTransaction = ctypes.windll.ktmw32.CreateTransaction 7 | CommitTransaction = ctypes.windll.ktmw32.CommitTransaction 8 | MoveFileTransacted = ctypes.windll.kernel32.MoveFileTransactedW 9 | CloseHandle = ctypes.windll.kernel32.CloseHandle 10 | 11 | MOVEFILE_REPLACE_EXISTING = 0x1 12 | MOVEFILE_WRITE_THROUGH = 0x8 13 | 14 | if sys.version_info >= (3, 0): 15 | unicode = str 16 | 17 | def atomicReplace(oldPath, newPath): 18 | if not isinstance(oldPath, unicode): 19 | oldPath = unicode(oldPath, sys.getfilesystemencoding()) 20 | if not isinstance(newPath, unicode): 21 | newPath = unicode(newPath, sys.getfilesystemencoding()) 22 | ta = CreateTransaction(None, 0, 0, 0, 0, 1000, 'atomic_replace') 23 | if ta == -1: 24 | return False 25 | res = MoveFileTransacted(oldPath, newPath, None, None, MOVEFILE_REPLACE_EXISTING | MOVEFILE_WRITE_THROUGH, ta) 26 | if not res: 27 | CloseHandle(ta) 28 | return False 29 | res = CommitTransaction(ta) 30 | CloseHandle(ta) 31 | return bool(res) 32 | else: 33 | atomicReplace = os.rename 34 | -------------------------------------------------------------------------------- /pysyncobj/batteries.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import weakref 3 | import time 4 | import socket 5 | import os 6 | import collections 7 | import heapq 8 | from .syncobj import SyncObjConsumer, replicated 9 | 10 | 11 | class ReplCounter(SyncObjConsumer): 12 | def __init__(self): 13 | """ 14 | Simple distributed counter. You can set, add, sub and inc counter value. 15 | """ 16 | super(ReplCounter, self).__init__() 17 | self.__counter = int() 18 | 19 | @replicated 20 | def set(self, newValue): 21 | """ 22 | Set new value to a counter. 23 | 24 | :param newValue: new value 25 | :return: new counter value 26 | """ 27 | self.__counter = newValue 28 | return self.__counter 29 | 30 | @replicated 31 | def add(self, value): 32 | """ 33 | Adds value to a counter. 34 | 35 | :param value: value to add 36 | :return: new counter value 37 | """ 38 | self.__counter += value 39 | return self.__counter 40 | 41 | @replicated 42 | def sub(self, value): 43 | """ 44 | Subtracts a value from counter. 45 | 46 | :param value: value to subtract 47 | :return: new counter value 48 | """ 49 | self.__counter -= value 50 | return self.__counter 51 | 52 | @replicated 53 | def inc(self): 54 | """ 55 | Increments counter value by one. 56 | 57 | :return: new counter value 58 | """ 59 | self.__counter += 1 60 | return self.__counter 61 | 62 | def get(self): 63 | """ 64 | :return: current counter value 65 | """ 66 | return self.__counter 67 | 68 | 69 | class ReplList(SyncObjConsumer): 70 | def __init__(self): 71 | """ 72 | Distributed list - it has an interface similar to a regular list. 73 | """ 74 | super(ReplList, self).__init__() 75 | self.__data = [] 76 | 77 | @replicated 78 | def reset(self, newData): 79 | """Replace list with a new one""" 80 | assert isinstance(newData, list) 81 | self.__data = newData 82 | 83 | @replicated 84 | def set(self, position, newValue): 85 | """Update value at given position.""" 86 | self.__data[position] = newValue 87 | 88 | @replicated 89 | def append(self, item): 90 | """Append item to end""" 91 | self.__data.append(item) 92 | 93 | @replicated 94 | def extend(self, other): 95 | """Extend list by appending elements from the iterable""" 96 | self.__data.extend(other) 97 | 98 | @replicated 99 | def insert(self, position, element): 100 | """Insert object before position""" 101 | self.__data.insert(position, element) 102 | 103 | @replicated 104 | def remove(self, element): 105 | """ 106 | Remove first occurrence of element. 107 | Raises ValueError if the value is not present. 108 | """ 109 | self.__data.remove(element) 110 | 111 | @replicated 112 | def pop(self, position=None): 113 | """ 114 | Remove and return item at position (default last). 115 | Raises IndexError if list is empty or index is out of range. 116 | """ 117 | return self.__data.pop(position) 118 | 119 | @replicated 120 | def sort(self, reverse=False): 121 | """Stable sort *IN PLACE*""" 122 | self.__data.sort(reverse=reverse) 123 | 124 | def index(self, element): 125 | """ 126 | Return first position of element. 127 | Raises ValueError if the value is not present. 128 | """ 129 | return self.__data.index(element) 130 | 131 | def count(self, element): 132 | """ Return number of occurrences of element """ 133 | return self.__data.count(element) 134 | 135 | def get(self, position): 136 | """ Return value at given position""" 137 | return self.__data[position] 138 | 139 | def __getitem__(self, position): 140 | """ Return value at given position""" 141 | return self.__data[position] 142 | 143 | @replicated(ver=1) 144 | def __setitem__(self, position, element): 145 | """Update value at given position.""" 146 | self.__data[position] = element 147 | 148 | def __len__(self): 149 | """Return the number of items of a sequence or collection.""" 150 | return len(self.__data) 151 | 152 | def rawData(self): 153 | """Return internal list - use it carefully""" 154 | return self.__data 155 | 156 | 157 | class ReplDict(SyncObjConsumer): 158 | def __init__(self): 159 | """ 160 | Distributed dict - it has an interface similar to a regular dict. 161 | """ 162 | super(ReplDict, self).__init__() 163 | self.__data = {} 164 | 165 | @replicated 166 | def reset(self, newData): 167 | """Replace dict with a new one""" 168 | assert isinstance(newData, dict) 169 | self.__data = newData 170 | 171 | @replicated 172 | def __setitem__(self, key, value): 173 | """Set value for specified key""" 174 | self.__data[key] = value 175 | 176 | @replicated 177 | def set(self, key, value): 178 | """Set value for specified key""" 179 | self.__data[key] = value 180 | 181 | @replicated 182 | def setdefault(self, key, default): 183 | """Return value for specified key, set default value if key not exist""" 184 | return self.__data.setdefault(key, default) 185 | 186 | @replicated 187 | def update(self, other): 188 | """Adds all values from the other dict""" 189 | self.__data.update(other) 190 | 191 | @replicated 192 | def pop(self, key, default=None): 193 | """Remove and return value for given key, return default if key not exist""" 194 | return self.__data.pop(key, default) 195 | 196 | @replicated 197 | def clear(self): 198 | """Remove all items from dict""" 199 | self.__data.clear() 200 | 201 | def __getitem__(self, key): 202 | """Return value for given key""" 203 | return self.__data[key] 204 | 205 | def get(self, key, default=None): 206 | """Return value for given key, return default if key not exist""" 207 | return self.__data.get(key, default) 208 | 209 | def __len__(self): 210 | """Return size of dict""" 211 | return len(self.__data) 212 | 213 | def __contains__(self, key): 214 | """True if key exists""" 215 | return key in self.__data 216 | 217 | def keys(self): 218 | """Return all keys""" 219 | return self.__data.keys() 220 | 221 | def values(self): 222 | """Return all values""" 223 | return self.__data.values() 224 | 225 | def items(self): 226 | """Return all items""" 227 | return self.__data.items() 228 | 229 | def rawData(self): 230 | """Return internal dict - use it carefully""" 231 | return self.__data 232 | 233 | 234 | class ReplSet(SyncObjConsumer): 235 | def __init__(self): 236 | """ 237 | Distributed set - it has an interface similar to a regular set. 238 | """ 239 | super(ReplSet, self).__init__() 240 | self.__data = set() 241 | 242 | @replicated 243 | def reset(self, newData): 244 | """Replace set with a new one""" 245 | assert isinstance(newData, set) 246 | self.__data = newData 247 | 248 | @replicated 249 | def add(self, item): 250 | """Add an element to a set""" 251 | self.__data.add(item) 252 | 253 | @replicated 254 | def remove(self, item): 255 | """ 256 | Remove an element from a set; it must be a member. 257 | If the element is not a member, raise a KeyError. 258 | """ 259 | self.__data.remove(item) 260 | 261 | @replicated 262 | def discard(self, item): 263 | """ 264 | Remove an element from a set if it is a member. 265 | If the element is not a member, do nothing. 266 | """ 267 | self.__data.discard(item) 268 | 269 | @replicated 270 | def pop(self): 271 | """ 272 | Remove and return an arbitrary set element. 273 | Raises KeyError if the set is empty. 274 | """ 275 | return self.__data.pop() 276 | 277 | @replicated 278 | def clear(self): 279 | """ Remove all elements from this set. """ 280 | self.__data.clear() 281 | 282 | @replicated 283 | def update(self, other): 284 | """ Update a set with the union of itself and others. """ 285 | self.__data.update(other) 286 | 287 | def rawData(self): 288 | """Return internal dict - use it carefully""" 289 | return self.__data 290 | 291 | def __len__(self): 292 | """Return size of set""" 293 | return len(self.__data) 294 | 295 | def __contains__(self, item): 296 | """True if item exists""" 297 | return item in self.__data 298 | 299 | 300 | class ReplQueue(SyncObjConsumer): 301 | def __init__(self, maxsize=0): 302 | """ 303 | Replicated FIFO queue. Based on collections.deque. 304 | Has an interface similar to Queue. 305 | 306 | :param maxsize: Max queue size. 307 | :type maxsize: int 308 | """ 309 | super(ReplQueue, self).__init__() 310 | self.__maxsize = maxsize 311 | self.__data = collections.deque() 312 | 313 | def qsize(self): 314 | """Return size of queue""" 315 | return len(self.__data) 316 | 317 | def empty(self): 318 | """True if queue is empty""" 319 | return len(self.__data) == 0 320 | 321 | def __len__(self): 322 | """Return size of queue""" 323 | return len(self.__data) 324 | 325 | def full(self): 326 | """True if queue is full""" 327 | return len(self.__data) == self.__maxsize 328 | 329 | @replicated 330 | def put(self, item): 331 | """Put an item into the queue. 332 | True - if item placed in queue. 333 | False - if queue is full and item can not be placed.""" 334 | if self.__maxsize and len(self.__data) >= self.__maxsize: 335 | return False 336 | self.__data.append(item) 337 | return True 338 | 339 | @replicated 340 | def get(self, default=None): 341 | """Extract item from queue. 342 | Return default if queue is empty.""" 343 | try: 344 | return self.__data.popleft() 345 | except: 346 | return default 347 | 348 | 349 | class ReplPriorityQueue(SyncObjConsumer): 350 | def __init__(self, maxsize=0): 351 | """ 352 | Replicated priority queue. Based on heapq. 353 | Has an interface similar to Queue. 354 | 355 | :param maxsize: Max queue size. 356 | :type maxsize: int 357 | """ 358 | super(ReplPriorityQueue, self).__init__() 359 | self.__maxsize = maxsize 360 | self.__data = [] 361 | 362 | def qsize(self): 363 | """Return size of queue""" 364 | return len(self.__data) 365 | 366 | def empty(self): 367 | """True if queue is empty""" 368 | return len(self.__data) == 0 369 | 370 | def __len__(self): 371 | """Return size of queue""" 372 | return len(self.__data) 373 | 374 | def full(self): 375 | """True if queue is full""" 376 | return len(self.__data) == self.__maxsize 377 | 378 | @replicated 379 | def put(self, item): 380 | """Put an item into the queue. Items should be comparable, eg. tuples. 381 | True - if item placed in queue. 382 | False - if queue is full and item can not be placed.""" 383 | if self.__maxsize and len(self.__data) >= self.__maxsize: 384 | return False 385 | heapq.heappush(self.__data, item) 386 | return True 387 | 388 | @replicated 389 | def get(self, default=None): 390 | """Extract the smallest item from queue. 391 | Return default if queue is empty.""" 392 | if not self.__data: 393 | return default 394 | return heapq.heappop(self.__data) 395 | 396 | 397 | class _ReplLockManagerImpl(SyncObjConsumer): 398 | def __init__(self, autoUnlockTime): 399 | super(_ReplLockManagerImpl, self).__init__() 400 | self.__locks = {} 401 | self.__autoUnlockTime = autoUnlockTime 402 | 403 | @replicated 404 | def acquire(self, lockID, clientID, currentTime): 405 | existingLock = self.__locks.get(lockID, None) 406 | # Auto-unlock old lock 407 | if existingLock is not None: 408 | if currentTime - existingLock[1] > self.__autoUnlockTime: 409 | existingLock = None 410 | # Acquire lock if possible 411 | if existingLock is None or existingLock[0] == clientID: 412 | self.__locks[lockID] = (clientID, currentTime) 413 | return True 414 | # Lock already acquired by someone else 415 | return False 416 | 417 | @replicated 418 | def prolongate(self, clientID, currentTime): 419 | for lockID in list(self.__locks): 420 | lockClientID, lockTime = self.__locks[lockID] 421 | 422 | if currentTime - lockTime > self.__autoUnlockTime: 423 | del self.__locks[lockID] 424 | continue 425 | 426 | if lockClientID == clientID: 427 | self.__locks[lockID] = (clientID, currentTime) 428 | 429 | @replicated 430 | def release(self, lockID, clientID): 431 | existingLock = self.__locks.get(lockID, None) 432 | if existingLock is not None and existingLock[0] == clientID: 433 | del self.__locks[lockID] 434 | 435 | def isAcquired(self, lockID, clientID, currentTime): 436 | existingLock = self.__locks.get(lockID, None) 437 | if existingLock is not None: 438 | if existingLock[0] == clientID: 439 | if currentTime - existingLock[1] < self.__autoUnlockTime: 440 | return True 441 | return False 442 | 443 | 444 | class ReplLockManager(object): 445 | 446 | def __init__(self, autoUnlockTime, selfID = None): 447 | """Replicated Lock Manager. Allow to acquire / release distributed locks. 448 | 449 | :param autoUnlockTime: lock will be released automatically 450 | if no response from holder for more than autoUnlockTime seconds 451 | :type autoUnlockTime: float 452 | :param selfID: (optional) - unique id of current lock holder. 453 | :type selfID: str 454 | """ 455 | self.__lockImpl = _ReplLockManagerImpl(autoUnlockTime) 456 | if selfID is None: 457 | selfID = '%s:%d:%d' % (socket.gethostname(), os.getpid(), id(self)) 458 | self.__selfID = selfID 459 | self.__autoUnlockTime = autoUnlockTime 460 | self.__mainThread = threading.current_thread() 461 | self.__initialised = threading.Event() 462 | self.__destroying = False 463 | self.__lastProlongateTime = 0 464 | self.__thread = threading.Thread(target=ReplLockManager._autoAcquireThread, args=(weakref.proxy(self),)) 465 | self.__thread.start() 466 | while not self.__initialised.is_set(): 467 | pass 468 | 469 | def _consumer(self): 470 | return self.__lockImpl 471 | 472 | def destroy(self): 473 | """Destroy should be called before destroying ReplLockManager""" 474 | self.__destroying = True 475 | 476 | def _autoAcquireThread(self): 477 | self.__initialised.set() 478 | try: 479 | while True: 480 | if not self.__mainThread.is_alive(): 481 | break 482 | if self.__destroying: 483 | break 484 | time.sleep(0.1) 485 | if time.time() - self.__lastProlongateTime < float(self.__autoUnlockTime) / 4.0: 486 | continue 487 | syncObj = self.__lockImpl._syncObj 488 | if syncObj is None: 489 | continue 490 | if syncObj._getLeader() is not None: 491 | self.__lastProlongateTime = time.time() 492 | self.__lockImpl.prolongate(self.__selfID, time.time()) 493 | except ReferenceError: 494 | pass 495 | 496 | def tryAcquire(self, lockID, callback=None, sync=False, timeout=None): 497 | """Attempt to acquire lock. 498 | 499 | :param lockID: unique lock identifier. 500 | :type lockID: str 501 | :param sync: True - to wait until lock is acquired or failed to acquire. 502 | :type sync: bool 503 | :param callback: if sync is False - callback will be called with operation result. 504 | :type callback: func(opResult, error) 505 | :param timeout: max operation time (default - unlimited) 506 | :type timeout: float 507 | :return True if acquired, False - somebody else already acquired lock 508 | """ 509 | attemptTime = time.time() 510 | if sync: 511 | acquireRes = self.__lockImpl.acquire(lockID, self.__selfID, attemptTime, callback=callback, sync=sync, timeout=timeout) 512 | acquireTime = time.time() 513 | if acquireRes: 514 | if acquireTime - attemptTime > self.__autoUnlockTime / 2.0: 515 | acquireRes = False 516 | self.__lockImpl.release(lockID, self.__selfID, sync=sync) 517 | return acquireRes 518 | 519 | def asyncCallback(acquireRes, errCode): 520 | if acquireRes: 521 | acquireTime = time.time() 522 | if acquireTime - attemptTime > self.__autoUnlockTime / 2.0: 523 | acquireRes = False 524 | self.__lockImpl.release(lockID, self.__selfID, sync=False) 525 | callback(acquireRes, errCode) 526 | 527 | self.__lockImpl.acquire(lockID, self.__selfID, attemptTime, callback=asyncCallback, sync=sync, timeout=timeout) 528 | 529 | def isAcquired(self, lockID): 530 | """Check if lock is acquired by ourselves. 531 | 532 | :param lockID: unique lock identifier. 533 | :type lockID: str 534 | :return True if lock is acquired by ourselves. 535 | """ 536 | return self.__lockImpl.isAcquired(lockID, self.__selfID, time.time()) 537 | 538 | def release(self, lockID, callback=None, sync=False, timeout=None): 539 | """ 540 | Release previously-acquired lock. 541 | 542 | :param lockID: unique lock identifier. 543 | :type lockID: str 544 | :param sync: True - to wait until lock is released or failed to release. 545 | :type sync: bool 546 | :param callback: if sync is False - callback will be called with operation result. 547 | :type callback: func(opResult, error) 548 | :param timeout: max operation time (default - unlimited) 549 | :type timeout: float 550 | """ 551 | self.__lockImpl.release(lockID, self.__selfID, callback=callback, sync=sync, timeout=timeout) 552 | -------------------------------------------------------------------------------- /pysyncobj/config.py: -------------------------------------------------------------------------------- 1 | 2 | class FAIL_REASON: 3 | SUCCESS = 0 #: Command successfully applied. 4 | QUEUE_FULL = 1 #: Commands queue full 5 | MISSING_LEADER = 2 #: Leader is currently missing (leader election in progress, or no connection) 6 | DISCARDED = 3 #: Command discarded (cause of new leader elected and another command was applied instead) 7 | NOT_LEADER = 4 #: Leader has changed, old leader did not have time to commit command. 8 | LEADER_CHANGED = 5 #: Simmilar to NOT_LEADER - leader has changed without command commit. 9 | REQUEST_DENIED = 6 #: Command denied 10 | 11 | class SERIALIZER_STATE: 12 | NOT_SERIALIZING = 0 #: Serialization not started or already finished. 13 | SERIALIZING = 1 #: Serialization in progress. 14 | SUCCESS = 2 #: Serialization successfully finished (should be returned only one time after finished). 15 | FAILED = 3 #: Serialization failed (should be returned only one time after finished). 16 | 17 | class SyncObjConf(object): 18 | """PySyncObj configuration object""" 19 | 20 | def __init__(self, **kwargs): 21 | 22 | #: Encrypt session with specified password. 23 | #: Install `cryptography` module to be able to set password. 24 | self.password = kwargs.get('password', None) 25 | 26 | #: Disable autoTick if you want to call onTick manually. 27 | #: Otherwise it will be called automatically from separate thread. 28 | self.autoTick = kwargs.get('autoTick', True) 29 | self.autoTickPeriod = kwargs.get('autoTickPeriod', 0.05) 30 | 31 | #: Commands queue is used to store commands before real processing. 32 | self.commandsQueueSize = kwargs.get('commandsQueueSize', 100000) 33 | 34 | #: After randomly selected timeout (in range from minTimeout to maxTimeout) 35 | #: leader considered dead, and leader election starts. 36 | self.raftMinTimeout = kwargs.get('raftMinTimeout', 0.4) 37 | 38 | #: Same as raftMinTimeout 39 | self.raftMaxTimeout = kwargs.get('raftMaxTimeout', 1.4) 40 | 41 | #: Interval of sending append_entries (ping) command. 42 | #: Should be less than raftMinTimeout. 43 | self.appendEntriesPeriod = kwargs.get('appendEntriesPeriod', 0.1) 44 | 45 | #: When no data received for connectionTimeout - connection considered dead. 46 | #: Should be more than raftMaxTimeout. 47 | self.connectionTimeout = kwargs.get('connectionTimeout', 3.5) 48 | 49 | #: Interval between connection attempts. 50 | #: Will try to connect to offline nodes each connectionRetryTime. 51 | self.connectionRetryTime = kwargs.get('connectionRetryTime', 5.0) 52 | 53 | #: When leader has no response from the majority of the cluster 54 | #: for leaderFallbackTimeout - it will fallback to follower state. 55 | #: Should be more than appendEntriesPeriod. 56 | self.leaderFallbackTimeout = kwargs.get('leaderFallbackTimeout', 30.0) 57 | 58 | #: Send multiple entries in a single command. 59 | #: Enabled (default) - improve overall performance (requests per second) 60 | #: Disabled - improve single request speed (don't wait till batch ready) 61 | self.appendEntriesUseBatch = kwargs.get('appendEntriesUseBatch', True) 62 | 63 | #: Max number of bytes per single append_entries command. 64 | self.appendEntriesBatchSizeBytes = kwargs.get('appendEntriesBatchSizeBytes', 2 ** 16) 65 | 66 | #: Bind address (address:port). Default - None. 67 | #: If None - selfAddress is used as bindAddress. 68 | #: Could be useful if selfAddress is not equal to bindAddress. 69 | #: Eg. with routers, nat, port forwarding, etc. 70 | self.bindAddress = kwargs.get('bindAddress', None) 71 | 72 | #: Preferred address type. Default - ipv4. 73 | #: None - no preferences, select random available. 74 | #: ipv4 - prefer ipv4 address type, if not available us ipv6. 75 | #: ipv6 - prefer ipv6 address type, if not available us ipv4. 76 | self.preferredAddrType = kwargs.get('preferredAddrType', 'ipv4') 77 | 78 | #: Size of send buffer for sockets. 79 | self.sendBufferSize = kwargs.get('sendBufferSize', 2 ** 16) 80 | 81 | #: Size of receive for sockets. 82 | self.recvBufferSize = kwargs.get('recvBufferSize', 2 ** 16) 83 | 84 | #: Time to cache dns requests (improves performance, 85 | #: no need to resolve address for each connection attempt). 86 | self.dnsCacheTime = kwargs.get('dnsCacheTime', 600.0) 87 | 88 | #: Time to cache failed dns request. 89 | self.dnsFailCacheTime = kwargs.get('dnsFailCacheTime', 30.0) 90 | 91 | #: Log will be compacted after it reach minEntries size or 92 | #: minTime after previous compaction. 93 | self.logCompactionMinEntries = kwargs.get('logCompactionMinEntries', 5000) 94 | 95 | #: Log will be compacted after it reach minEntries size or 96 | #: minTime after previous compaction. 97 | self.logCompactionMinTime = kwargs.get('logCompactionMinTime', 300) 98 | 99 | #: If true - each node will start log compaction in separate time window. 100 | #: eg. node1 in 12.00-12.10, node2 in 12.10-12.20, node3 12.20 - 12.30, 101 | #: then again node1 12.30-12.40, node2 12.40-12.50, etc. 102 | self.logCompactionSplit = kwargs.get('logCompactionSplit', False) 103 | 104 | #: Max number of bytes per single append_entries command 105 | #: while sending serialized object. 106 | self.logCompactionBatchSize = kwargs.get('logCompactionBatchSize', 2 ** 16) 107 | 108 | #: If true - commands will be enqueued and executed after leader detected. 109 | #: Otherwise - `FAIL_REASON.MISSING_LEADER <#pysyncobj.FAIL_REASON.MISSING_LEADER>`_ error will be emitted. 110 | #: Leader is missing when esteblishing connection or when election in progress. 111 | self.commandsWaitLeader = kwargs.get('commandsWaitLeader', True) 112 | 113 | #: File to store full serialized object. Save full dump on disc when doing log compaction. 114 | #: None - to disable store. 115 | self.fullDumpFile = kwargs.get('fullDumpFile', None) 116 | 117 | #: File to store operations journal. Save each record as soon as received. 118 | self.journalFile = kwargs.get('journalFile', None) 119 | 120 | #: Will try to bind port every bindRetryTime seconds until success. 121 | self.bindRetryTime = kwargs.get('bindRetryTime', 1.0) 122 | 123 | #: Max number of attempts to bind port (default 0, unlimited). 124 | self.maxBindRetries = kwargs.get('maxBindRetries', 0) 125 | 126 | #: This callback will be called as soon as SyncObj sync all data from leader. 127 | self.onReady = kwargs.get('onReady', None) 128 | 129 | #: This callback will be called for every change of SyncObj state. 130 | #: Arguments: onStateChanged(oldState, newState). 131 | #: WARNING: there could be multiple leaders at the same time! 132 | self.onStateChanged = kwargs.get('onStateChanged', None) 133 | 134 | #: If enabled - cluster configuration could be changed dynamically. 135 | self.dynamicMembershipChange = kwargs.get('dynamicMembershipChange', False) 136 | 137 | #: Sockets poller: 138 | #: * `auto` - auto select best available on current platform 139 | #: * `select` - use select poller 140 | #: * `poll` - use poll poller 141 | self.pollerType = kwargs.get('pollerType', 'auto') 142 | 143 | #: Use fork if available when serializing on disk. 144 | self.useFork = kwargs.get('useFork', True) 145 | 146 | #: Custom serialize function, it will be called when logCompaction (fullDump) happens. 147 | #: If specified - there should be a custom deserializer too. 148 | #: Arguments: serializer(fileName, data) 149 | #: data - some internal stuff that is *required* to be serialized with your object data. 150 | self.serializer = kwargs.get('serializer', None) 151 | 152 | #: Check custom serialization state, for async serializer. 153 | #: Should return one of `SERIALIZER_STATE <#pysyncobj.SERIALIZER_STATE>`_. 154 | self.serializeChecker = kwargs.get('serializeChecker', None) 155 | 156 | #: Custom deserialize function, it will be called when restore from fullDump. 157 | #: If specified - there should be a custom serializer too. 158 | #: Should return data - internal stuff that was passed to serialize. 159 | self.deserializer = kwargs.get('deserializer', None) 160 | 161 | #: This callback will be called when cluster is switched to new version. 162 | #: onCodeVersionChanged(oldVer, newVer) 163 | self.onCodeVersionChanged = kwargs.get('onCodeVersionChanged', None) 164 | 165 | #: TCP socket keepalive 166 | #: (keepalive_time_seconds, probe_intervals_seconds, max_fails_count) 167 | #: Set to None to disable 168 | self.tcp_keepalive = kwargs.get('tcp_keepalive', (16, 3, 5)) 169 | 170 | def validate(self): 171 | assert self.autoTickPeriod > 0 172 | assert self.commandsQueueSize >= 0 173 | assert self.raftMinTimeout > self.appendEntriesPeriod * 3 174 | assert self.raftMaxTimeout > self.raftMinTimeout 175 | assert self.appendEntriesPeriod > 0 176 | assert self.leaderFallbackTimeout > self.appendEntriesPeriod 177 | assert self.connectionTimeout >= self.raftMaxTimeout 178 | assert self.connectionRetryTime >= 0 179 | assert self.appendEntriesBatchSizeBytes > 0 180 | assert self.sendBufferSize > 0 181 | assert self.recvBufferSize > 0 182 | assert self.dnsCacheTime>= 0 183 | assert self.dnsFailCacheTime >= 0 184 | assert self.logCompactionMinEntries >= 2 185 | assert self.logCompactionMinTime > 0 186 | assert self.logCompactionBatchSize > 0 187 | assert self.bindRetryTime > 0 188 | assert (self.deserializer is None) == (self.serializer is None) 189 | if self.serializer is not None: 190 | assert self.fullDumpFile is not None 191 | assert self.preferredAddrType in ('ipv4', 'ipv6', None) 192 | if self.tcp_keepalive is not None: 193 | assert isinstance(self.tcp_keepalive, tuple) 194 | assert len(self.tcp_keepalive) == 3 195 | for i in range(3): 196 | assert isinstance(self.tcp_keepalive[i], int) 197 | assert self.tcp_keepalive[i] > 0 198 | -------------------------------------------------------------------------------- /pysyncobj/dns_resolver.py: -------------------------------------------------------------------------------- 1 | import time 2 | import socket 3 | import random 4 | import logging 5 | from .monotonic import monotonic as monotonicTime 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | 10 | class DnsCachingResolver(object): 11 | def __init__(self, cacheTime, failCacheTime): 12 | self.__cache = {} 13 | self.__cacheTime = cacheTime 14 | self.__failCacheTime = failCacheTime 15 | self.__preferredAddrFamily = socket.AF_INET 16 | 17 | def setTimeouts(self, cacheTime, failCacheTime): 18 | self.__cacheTime = cacheTime 19 | self.__failCacheTime = failCacheTime 20 | 21 | def resolve(self, hostname): 22 | currTime = monotonicTime() 23 | cachedTime, ips = self.__cache.get(hostname, (-self.__failCacheTime-1, [])) 24 | timePassed = currTime - cachedTime 25 | if (timePassed > self.__cacheTime) or (not ips and timePassed > self.__failCacheTime): 26 | prevIps = ips 27 | ips = self.__doResolve(hostname) 28 | if not ips: 29 | logger.warning("failed to resolve hostname: " + hostname) 30 | ips = prevIps 31 | self.__cache[hostname] = (currTime, ips) 32 | return None if not ips else random.choice(ips) 33 | 34 | def setPreferredAddrFamily(self, preferredAddrFamily): 35 | if preferredAddrFamily is None: 36 | self.__preferredAddrFamily = None 37 | elif preferredAddrFamily == 'ipv4': 38 | self.__preferredAddrFamily = socket.AF_INET 39 | elif preferredAddrFamily == 'ipv6': 40 | self.__preferredAddrFamily = socket.AF_INET 41 | else: 42 | self.__preferredAddrFamily = preferredAddrFamily 43 | 44 | def __doResolve(self, hostname): 45 | try: 46 | addrs = socket.getaddrinfo(hostname, None) 47 | ips = [] 48 | if self.__preferredAddrFamily is not None: 49 | ips = list(set([addr[4][0] for addr in addrs\ 50 | if addr[0] == self.__preferredAddrFamily])) 51 | if not ips: 52 | ips = list(set([addr[4][0] for addr in addrs])) 53 | except socket.gaierror: 54 | logger.warning('failed to resolve host %s', hostname) 55 | ips = [] 56 | return ips 57 | 58 | _g_resolver = None 59 | def globalDnsResolver(): 60 | global _g_resolver 61 | if _g_resolver is None: 62 | _g_resolver = DnsCachingResolver(cacheTime=600.0, failCacheTime=30.0) 63 | return _g_resolver 64 | -------------------------------------------------------------------------------- /pysyncobj/encryptor.py: -------------------------------------------------------------------------------- 1 | import base64 2 | try: 3 | import cryptography 4 | from cryptography.fernet import Fernet 5 | from cryptography.hazmat.backends import default_backend 6 | from cryptography.hazmat.primitives import hashes 7 | from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC 8 | HAS_CRYPTO = True 9 | except: 10 | HAS_CRYPTO = False 11 | 12 | SALT = b'\x15%q\xe6\xbb\x02\xa6\xf8\x13q\x90\xcf6+\x1e\xeb' 13 | 14 | def getEncryptor(password): 15 | if not isinstance(password, bytes): 16 | password = bytes(password.encode()) 17 | kdf = PBKDF2HMAC( 18 | algorithm=hashes.SHA256(), 19 | length=32, 20 | salt=SALT, 21 | iterations=100000, 22 | backend=default_backend() 23 | ) 24 | key = base64.urlsafe_b64encode(kdf.derive(password)) 25 | return Fernet(key) 26 | -------------------------------------------------------------------------------- /pysyncobj/fast_queue.py: -------------------------------------------------------------------------------- 1 | try: 2 | import Queue 3 | except ImportError: 4 | import queue as Queue 5 | from collections import deque 6 | import threading 7 | 8 | # According to benchmarks, standard Queue is slow. 9 | # Using FastQueue improves overall performance by ~15% 10 | class FastQueue(object): 11 | def __init__(self, maxSize): 12 | self.__queue = deque() 13 | self.__lock = threading.Lock() 14 | self.__maxSize = maxSize 15 | 16 | def put_nowait(self, value): 17 | with self.__lock: 18 | if len(self.__queue) > self.__maxSize: 19 | raise Queue.Full() 20 | self.__queue.append(value) 21 | 22 | def get_nowait(self): 23 | with self.__lock: 24 | if len(self.__queue) == 0: 25 | raise Queue.Empty() 26 | return self.__queue.popleft() 27 | -------------------------------------------------------------------------------- /pysyncobj/journal.py: -------------------------------------------------------------------------------- 1 | import os 2 | import mmap 3 | import struct 4 | import shutil 5 | 6 | from .version import VERSION 7 | from .pickle import to_bytes, loads, dumps 8 | 9 | class Journal(object): 10 | 11 | def add(self, command, idx, term): 12 | raise NotImplementedError 13 | 14 | def clear(self): 15 | raise NotImplementedError 16 | 17 | def deleteEntriesFrom(self, entryFrom): 18 | raise NotImplementedError 19 | 20 | def deleteEntriesTo(self, entryTo): 21 | raise NotImplementedError 22 | 23 | def __getitem__(self, item): 24 | raise NotImplementedError 25 | 26 | def __len__(self): 27 | raise NotImplementedError 28 | 29 | def _destroy(self): 30 | raise NotImplementedError 31 | 32 | def setRaftCommitIndex(self, raftCommitIndex): 33 | raise NotImplementedError 34 | 35 | def getRaftCommitIndex(self): 36 | raise NotImplementedError 37 | 38 | def onOneSecondTimer(self): 39 | pass 40 | 41 | 42 | class MemoryJournal(Journal): 43 | 44 | def __init__(self): 45 | self.__journal = [] 46 | self.__bytesSize = 0 47 | self.__lastCommitIndex = 0 48 | 49 | def add(self, command, idx, term): 50 | self.__journal.append((command, idx, term)) 51 | 52 | def clear(self): 53 | self.__journal = [] 54 | 55 | def deleteEntriesFrom(self, entryFrom): 56 | del self.__journal[entryFrom:] 57 | 58 | def deleteEntriesTo(self, entryTo): 59 | self.__journal = self.__journal[entryTo:] 60 | 61 | def __getitem__(self, item): 62 | return self.__journal[item] 63 | 64 | def __len__(self): 65 | return len(self.__journal) 66 | 67 | def _destroy(self): 68 | pass 69 | 70 | def setRaftCommitIndex(self, raftCommitIndex): 71 | pass 72 | 73 | def getRaftCommitIndex(self): 74 | return 1 75 | 76 | 77 | 78 | class ResizableFile(object): 79 | 80 | def __init__(self, fileName, initialSize = 1024, resizeFactor = 2.0, defaultContent = None): 81 | self.__fileName = fileName 82 | self.__resizeFactor = resizeFactor 83 | if not os.path.exists(fileName): 84 | with open(fileName, 'wb') as f: 85 | if defaultContent is not None: 86 | f.write(defaultContent) 87 | self.__f = open(fileName, 'r+b') 88 | self.__mm = mmap.mmap(self.__f.fileno(), 0) 89 | currSize = self.__mm.size() 90 | if currSize < initialSize: 91 | try: 92 | self.__mm.resize(initialSize) 93 | except SystemError: 94 | self.__extand(initialSize - currSize) 95 | 96 | def write(self, offset, values): 97 | size = len(values) 98 | currSize = self.__mm.size() 99 | if offset + size > self.__mm.size(): 100 | try: 101 | self.__mm.resize(int(self.__mm.size() * self.__resizeFactor)) 102 | except SystemError: 103 | self.__extand(int(self.__mm.size() * self.__resizeFactor) - currSize) 104 | self.__mm[offset:offset + size] = values 105 | 106 | def read(self, offset, size): 107 | return self.__mm[offset:offset + size] 108 | 109 | def __extand(self, bytesToAdd): 110 | self.__mm.close() 111 | self.__f.close() 112 | with open(self.__fileName, 'ab') as f: 113 | f.write(b'\0' * bytesToAdd) 114 | self.__f = open(self.__fileName, 'r+b') 115 | self.__mm = mmap.mmap(self.__f.fileno(), 0) 116 | 117 | def _destroy(self): 118 | self.__mm.flush() 119 | self.__mm.close() 120 | self.__f.close() 121 | 122 | def flush(self): 123 | self.__mm.flush() 124 | 125 | 126 | class MetaStorer(object): 127 | def __init__(self, path): 128 | self.__path = path 129 | 130 | def getMeta(self): 131 | meta = {} 132 | try: 133 | meta = loads(open(self.__path, 'rb').read()) 134 | except: 135 | pass 136 | return meta 137 | 138 | def storeMeta(self, meta): 139 | with open(self.__path + '.tmp', 'wb') as f: 140 | f.write(dumps(meta)) 141 | f.flush() 142 | shutil.move(self.__path + '.tmp', self.__path) 143 | 144 | def getPath(self): 145 | return self.__path 146 | 147 | 148 | JOURNAL_FORMAT_VERSION = 1 149 | APP_NAME = b'PYSYNCOBJ' 150 | APP_VERSION = str.encode(VERSION) 151 | 152 | NAME_SIZE = 24 153 | VERSION_SIZE = 8 154 | assert len(APP_NAME) < NAME_SIZE 155 | assert len(APP_VERSION) < VERSION_SIZE 156 | FIRST_RECORD_OFFSET = NAME_SIZE + VERSION_SIZE + 4 + 4 157 | LAST_RECORD_OFFSET_OFFSET = NAME_SIZE + VERSION_SIZE + 4 158 | 159 | # 160 | # APP_NAME (24b) + APP_VERSION (8b) + FORMAT_VERSION (4b) + LAST_RECORD_OFFSET (4b) + 161 | # record1size + record1 + record1size + record2size + record2 + record2size + ... 162 | # (record1) | (record2) | ... 163 | # 164 | 165 | class FileJournal(Journal): 166 | 167 | def __init__(self, journalFile): 168 | self.__journalFile = ResizableFile(journalFile, defaultContent=self.__getDefaultHeader()) 169 | self.__journal = [] 170 | self.__metaStorer = MetaStorer(journalFile + '.meta') 171 | self.__meta = self.__metaStorer.getMeta() 172 | self.__metaSaved = True 173 | currentOffset = FIRST_RECORD_OFFSET 174 | lastRecordOffset = self.__getLastRecordOffset() 175 | while currentOffset < lastRecordOffset: 176 | nextRecordSize = struct.unpack(' 21 | Licensed under the Apache License, Version 2.0 (the "License"); 22 | you may not use this file except in compliance with the License. 23 | You may obtain a copy of the License at 24 | http://www.apache.org/licenses/LICENSE-2.0 25 | Unless required by applicable law or agreed to in writing, software 26 | distributed under the License is distributed on an "AS IS" BASIS, 27 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 28 | See the License for the specific language governing permissions and 29 | limitations under the License. 30 | """ 31 | import time 32 | 33 | 34 | __all__ = ('monotonic',) 35 | 36 | 37 | try: 38 | time.CLOCK_MONOTONIC_RAW 39 | time.clock_gettime(time.CLOCK_MONOTONIC_RAW) 40 | monotonic = lambda: time.clock_gettime(time.CLOCK_MONOTONIC_RAW) 41 | except AttributeError: 42 | import ctypes 43 | import ctypes.util 44 | import os 45 | import sys 46 | import threading 47 | try: 48 | if sys.platform == 'darwin': # OS X, iOS 49 | # See Technical Q&A QA1398 of the Mac Developer Library: 50 | # 51 | libc = ctypes.CDLL('/usr/lib/libc.dylib', use_errno=True) 52 | 53 | class mach_timebase_info_data_t(ctypes.Structure): 54 | """System timebase info. Defined in .""" 55 | _fields_ = (('numer', ctypes.c_uint32), 56 | ('denom', ctypes.c_uint32)) 57 | 58 | mach_absolute_time = libc.mach_absolute_time 59 | mach_absolute_time.restype = ctypes.c_uint64 60 | 61 | timebase = mach_timebase_info_data_t() 62 | libc.mach_timebase_info(ctypes.byref(timebase)) 63 | ticks_per_second = timebase.numer / timebase.denom * 1.0e9 64 | 65 | def monotonic(): 66 | """Monotonic clock, cannot go backward.""" 67 | return mach_absolute_time() / ticks_per_second 68 | 69 | elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'): 70 | if sys.platform.startswith('cygwin'): 71 | # Note: cygwin implements clock_gettime (CLOCK_MONOTONIC = 4) since 72 | # version 1.7.6. Using raw WinAPI for maximum version compatibility. 73 | 74 | # Ugly hack using the wrong calling convention (in 32-bit mode) 75 | # because ctypes has no windll under cygwin (and it also seems that 76 | # the code letting you select stdcall in _ctypes doesn't exist under 77 | # the preprocessor definitions relevant to cygwin). 78 | # This is 'safe' because: 79 | # 1. The ABI of GetTickCount and GetTickCount64 is identical for 80 | # both calling conventions because they both have no parameters. 81 | # 2. libffi masks the problem because after making the call it doesn't 82 | # touch anything through esp and epilogue code restores a correct 83 | # esp from ebp afterwards. 84 | try: 85 | kernel32 = ctypes.cdll.kernel32 86 | except OSError: # 'No such file or directory' 87 | kernel32 = ctypes.cdll.LoadLibrary('kernel32.dll') 88 | else: 89 | kernel32 = ctypes.windll.kernel32 90 | 91 | GetTickCount64 = getattr(kernel32, 'GetTickCount64', None) 92 | if GetTickCount64: 93 | # Windows Vista / Windows Server 2008 or newer. 94 | GetTickCount64.restype = ctypes.c_ulonglong 95 | 96 | def monotonic(): 97 | """Monotonic clock, cannot go backward.""" 98 | return GetTickCount64() / 1000.0 99 | 100 | else: 101 | # Before Windows Vista. 102 | GetTickCount = kernel32.GetTickCount 103 | GetTickCount.restype = ctypes.c_uint32 104 | 105 | get_tick_count_lock = threading.Lock() 106 | get_tick_count_last_sample = 0 107 | get_tick_count_wraparounds = 0 108 | 109 | def monotonic(): 110 | """Monotonic clock, cannot go backward.""" 111 | global get_tick_count_last_sample 112 | global get_tick_count_wraparounds 113 | 114 | with get_tick_count_lock: 115 | current_sample = GetTickCount() 116 | if current_sample < get_tick_count_last_sample: 117 | get_tick_count_wraparounds += 1 118 | get_tick_count_last_sample = current_sample 119 | 120 | final_milliseconds = get_tick_count_wraparounds << 32 121 | final_milliseconds += get_tick_count_last_sample 122 | return final_milliseconds / 1000.0 123 | 124 | else: 125 | try: 126 | clock_gettime = ctypes.CDLL(ctypes.util.find_library('c'), 127 | use_errno=True).clock_gettime 128 | except Exception: 129 | clock_gettime = ctypes.CDLL(ctypes.util.find_library('rt'), 130 | use_errno=True).clock_gettime 131 | 132 | class timespec(ctypes.Structure): 133 | """Time specification, as described in clock_gettime(3).""" 134 | _fields_ = (('tv_sec', ctypes.c_long), 135 | ('tv_nsec', ctypes.c_long)) 136 | 137 | if sys.platform.startswith('linux'): 138 | CLOCK_MONOTONIC = 4 # actually this is CLOCK_MONOTONIC_RAW 139 | elif sys.platform.startswith('freebsd'): 140 | CLOCK_MONOTONIC = 4 141 | elif sys.platform.startswith('sunos5'): 142 | CLOCK_MONOTONIC = 4 143 | elif 'bsd' in sys.platform: 144 | CLOCK_MONOTONIC = 3 145 | elif sys.platform.startswith('aix'): 146 | CLOCK_MONOTONIC = ctypes.c_longlong(10) 147 | 148 | def monotonic(): 149 | """Monotonic clock, cannot go backward.""" 150 | ts = timespec() 151 | if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(ts)): 152 | errno = ctypes.get_errno() 153 | raise OSError(errno, os.strerror(errno)) 154 | return ts.tv_sec + ts.tv_nsec / 1.0e9 155 | 156 | # Perform a sanity-check. 157 | if monotonic() - monotonic() > 0: 158 | raise ValueError('monotonic() is not monotonic!') 159 | 160 | except Exception as e: 161 | monotonic = lambda: time.time() 162 | 163 | -------------------------------------------------------------------------------- /pysyncobj/node.py: -------------------------------------------------------------------------------- 1 | from .dns_resolver import globalDnsResolver 2 | 3 | 4 | class Node(object): 5 | """ 6 | A representation of any node in the network. 7 | 8 | The ID must uniquely identify a node. Node objects with the same ID will be treated as equal, i.e. as representing the same node. 9 | """ 10 | 11 | def __init__(self, id, **kwargs): 12 | """ 13 | Initialise the Node; id must be immutable, hashable, and unique. 14 | 15 | :param id: unique, immutable, hashable ID of a node 16 | :type id: any 17 | :param **kwargs: any further information that should be kept about this node 18 | """ 19 | 20 | self._id = id 21 | for key in kwargs: 22 | setattr(self, key, kwargs[key]) 23 | 24 | def __setattr__(self, name, value): 25 | if name == 'id': 26 | raise AttributeError('Node id is not mutable') 27 | super(Node, self).__setattr__(name, value) 28 | 29 | def __eq__(self, other): 30 | return isinstance(other, Node) and self.id == other.id 31 | 32 | def __ne__(self, other): 33 | # In Python 3, __ne__ defaults to inverting the result of __eq__. 34 | # Python 2 isn't as sane. So for Python 2 compatibility, we also need to define the != operator explicitly. 35 | return not (self == other) 36 | 37 | def __hash__(self): 38 | return hash(self.id) 39 | 40 | def __str__(self): 41 | return self.id 42 | 43 | def __repr__(self): 44 | v = vars(self) 45 | return '{}({}{})'.format(type(self).__name__, repr(self.id), (', ' + ', '.join('{} = {}'.format(key, repr(v[key])) for key in v if key != '_id')) if len(v) > 1 else '') 46 | 47 | def _destroy(self): 48 | pass 49 | @property 50 | def id(self): 51 | return self._id 52 | 53 | 54 | class TCPNode(Node): 55 | """ 56 | A node intended for communication over TCP/IP. Its id is the network address (host:port). 57 | """ 58 | 59 | def __init__(self, address, **kwargs): 60 | """ 61 | Initialise the TCPNode 62 | 63 | :param address: network address of the node in the format 'host:port' 64 | :type address: str 65 | :param **kwargs: any further information that should be kept about this node 66 | """ 67 | 68 | super(TCPNode, self).__init__(address, **kwargs) 69 | self.__address = address 70 | self.__host, port = address.rsplit(':', 1) 71 | self.__port = int(port) 72 | #self.__ip = globalDnsResolver().resolve(self.host) 73 | 74 | @property 75 | def address(self): 76 | return self.__address 77 | 78 | @property 79 | def host(self): 80 | return self.__host 81 | 82 | @property 83 | def port(self): 84 | return self.__port 85 | 86 | @property 87 | def ip(self): 88 | return globalDnsResolver().resolve(self.__host) 89 | 90 | def __repr__(self): 91 | v = vars(self) 92 | filtered = ['_id', '_TCPNode__address', '_TCPNode__host', '_TCPNode__port', '_TCPNode__ip'] 93 | formatted = ['{} = {}'.format(key, repr(v[key])) for key in v if key not in filtered] 94 | return '{}({}{})'.format(type(self).__name__, repr(self.id), (', ' + ', '.join(formatted)) if len(formatted) else '') 95 | -------------------------------------------------------------------------------- /pysyncobj/pickle.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | is_py3 = sys.version_info >= (3, 0) 4 | 5 | if is_py3: 6 | import pickle 7 | from struct import unpack 8 | 9 | # python3 sometimes failes to unpickle data pickled by python2, it happens 10 | # because it is trying to decode binary data into the string and fails. 11 | # UnicodeDecodeError exception is raised in this case. Instead of simply 12 | # giving up we will retry decoding with with "slow" _Unpickler implemented 13 | # in pure python with overriden following methods. 14 | # The main idea is - treat object as binary if the decoding has failed. 15 | # Such approach will not affect performance when we run all nodes with 16 | # the same python version, beacuse it will never retry. 17 | def _load_short_binstring(self): 18 | len = ord(self.read(1)) 19 | data = self.read(len) 20 | try: 21 | data = str(data, self.encoding, self.errors) 22 | except: 23 | pass 24 | self.append(data) 25 | 26 | def _load_binstring(self): 27 | len, = unpack(' 0 and port < 65536) 13 | return True 14 | except: 15 | return False 16 | 17 | 18 | def executeAdminCommand(args): 19 | parser = ArgumentParser() 20 | parser.add_argument('-conn', action='store', dest='connection', help='address to connect') 21 | parser.add_argument('-pass', action='store', dest='password', help='cluster\'s password') 22 | parser.add_argument('-status', action='store_true', help='send command \'status\'') 23 | parser.add_argument('-add', action='store', dest='add', help='send command \'add\'') 24 | parser.add_argument('-remove', action='store', dest='remove', help='send command \'remove\'') 25 | parser.add_argument('-set_version', action='store', dest='version', type=int, help='set cluster code version') 26 | 27 | data = parser.parse_args(args) 28 | if not checkCorrectAddress(data.connection): 29 | return 'invalid address to connect' 30 | 31 | if data.status: 32 | message = ['status'] 33 | elif data.add: 34 | if not checkCorrectAddress(data.add): 35 | return 'invalid address to command add' 36 | message = ['add', data.add] 37 | elif data.remove: 38 | if not checkCorrectAddress(data.remove): 39 | return 'invalid address to command remove' 40 | message = ['remove', data.remove] 41 | elif data.version is not None: 42 | message = ['set_version', data.version] 43 | else: 44 | return 'invalid command' 45 | 46 | util = TcpUtility(data.password) 47 | try: 48 | result = util.executeCommand(data.connection, message) 49 | except UtilityException as e: 50 | return str(e) 51 | 52 | if isinstance(result, str): 53 | return result 54 | if isinstance(result, dict): 55 | return '\n'.join('%s: %s' % (k, v) for k, v in sorted(result.items())) 56 | return str(result) 57 | 58 | 59 | def main(args=None): 60 | if args is None: 61 | args = sys.argv[1:] 62 | 63 | result = executeAdminCommand(args) 64 | sys.stdout.write(result) 65 | sys.stdout.write(os.linesep) 66 | 67 | 68 | if __name__ == '__main__': 69 | main() 70 | -------------------------------------------------------------------------------- /pysyncobj/tcp_connection.py: -------------------------------------------------------------------------------- 1 | import time 2 | import socket 3 | from sys import platform 4 | import zlib 5 | import struct 6 | 7 | import pysyncobj.pickle as pickle 8 | import pysyncobj.win_inet_pton 9 | 10 | from .poller import POLL_EVENT_TYPE 11 | from .monotonic import monotonic as monotonicTime 12 | 13 | 14 | class CONNECTION_STATE: 15 | DISCONNECTED = 0 16 | CONNECTING = 1 17 | CONNECTED = 2 18 | 19 | def _getAddrType(addr): 20 | try: 21 | socket.inet_aton(addr) 22 | return socket.AF_INET 23 | except socket.error: 24 | pass 25 | try: 26 | socket.inet_pton(socket.AF_INET6, addr) 27 | return socket.AF_INET6 28 | except socket.error: 29 | pass 30 | raise Exception('unknown address type') 31 | 32 | import socket 33 | 34 | def set_keepalive_linux(sock, after_idle_sec=1, interval_sec=3, max_fails=5): 35 | sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) 36 | sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, after_idle_sec) 37 | sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, interval_sec) 38 | sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, max_fails) 39 | 40 | def set_keepalive_osx(sock, after_idle_sec=1, interval_sec=3, max_fails=5): 41 | TCP_KEEPALIVE = 0x10 42 | sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) 43 | sock.setsockopt(socket.IPPROTO_TCP, TCP_KEEPALIVE, interval_sec) 44 | 45 | def set_keepalive_windows(sock, after_idle_sec=1, interval_sec=3, max_fails=5): 46 | sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, after_idle_sec * 1000, interval_sec * 1000)) 47 | 48 | def set_keepalive(sock, after_idle_sec=1, interval_sec=3, max_fails=5): 49 | if platform == "linux" or platform == "linux2": 50 | set_keepalive_linux(sock, after_idle_sec, interval_sec, max_fails) 51 | elif platform == "darwin": 52 | set_keepalive_osx(sock, after_idle_sec, interval_sec, max_fails) 53 | elif platform == "win32": 54 | set_keepalive_windows(sock, after_idle_sec, interval_sec, max_fails) 55 | 56 | 57 | class TcpConnection(object): 58 | 59 | def __init__(self, poller, onMessageReceived = None, onConnected = None, onDisconnected = None, 60 | socket=None, timeout=10.0, sendBufferSize = 2 ** 13, recvBufferSize = 2 ** 13, 61 | keepalive=None): 62 | self.sendRandKey = None 63 | self.recvRandKey = None 64 | self.recvLastTimestamp = 0 65 | self.encryptor = None 66 | 67 | self.__socket = socket 68 | self.__readBuffer = bytes() 69 | self.__writeBuffer = bytes() 70 | self.__lastReadTime = monotonicTime() 71 | self.__timeout = timeout 72 | self.__poller = poller 73 | self.__keepalive = keepalive 74 | if socket is not None: 75 | self.__socket = socket 76 | self.__fileno = socket.fileno() 77 | self.__state = CONNECTION_STATE.CONNECTED 78 | self.setSockoptKeepalive() 79 | self.__poller.subscribe(self.__fileno, 80 | self.__processConnection, 81 | POLL_EVENT_TYPE.READ | POLL_EVENT_TYPE.WRITE | POLL_EVENT_TYPE.ERROR) 82 | else: 83 | self.__state = CONNECTION_STATE.DISCONNECTED 84 | self.__fileno = None 85 | self.__socket = None 86 | 87 | self.__onMessageReceived = onMessageReceived 88 | self.__onConnected = onConnected 89 | self.__onDisconnected = onDisconnected 90 | self.__sendBufferSize = sendBufferSize 91 | self.__recvBufferSize = recvBufferSize 92 | 93 | def setSockoptKeepalive(self): 94 | if self.__socket is None: 95 | return 96 | if self.__keepalive is None: 97 | return 98 | set_keepalive( 99 | self.__socket, 100 | self.__keepalive[0], 101 | self.__keepalive[1], 102 | self.__keepalive[2], 103 | ) 104 | 105 | def setOnConnectedCallback(self, onConnected): 106 | self.__onConnected = onConnected 107 | 108 | def setOnMessageReceivedCallback(self, onMessageReceived): 109 | self.__onMessageReceived = onMessageReceived 110 | 111 | def setOnDisconnectedCallback(self, onDisconnected): 112 | self.__onDisconnected = onDisconnected 113 | 114 | def connect(self, host, port): 115 | if host is None: 116 | return False 117 | self.__state = CONNECTION_STATE.DISCONNECTED 118 | self.__fileno = None 119 | self.__socket = socket.socket(_getAddrType(host), socket.SOCK_STREAM) 120 | self.__socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.__sendBufferSize) 121 | self.__socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.__recvBufferSize) 122 | self.__socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) 123 | self.setSockoptKeepalive() 124 | self.__socket.setblocking(0) 125 | self.__readBuffer = bytes() 126 | self.__writeBuffer = bytes() 127 | self.__lastReadTime = monotonicTime() 128 | 129 | try: 130 | self.__socket.connect((host, port)) 131 | except socket.error as e: 132 | if e.errno not in (socket.errno.EINPROGRESS, socket.errno.EWOULDBLOCK): 133 | return False 134 | self.__fileno = self.__socket.fileno() 135 | self.__state = CONNECTION_STATE.CONNECTING 136 | self.__poller.subscribe(self.__fileno, 137 | self.__processConnection, 138 | POLL_EVENT_TYPE.READ | POLL_EVENT_TYPE.WRITE | POLL_EVENT_TYPE.ERROR) 139 | return True 140 | 141 | def send(self, message): 142 | if self.sendRandKey: 143 | message = (self.sendRandKey, message) 144 | data = zlib.compress(pickle.dumps(message), 3) 145 | if self.encryptor: 146 | data = self.encryptor.encrypt_at_time(data, int(monotonicTime())) 147 | data = struct.pack('i', len(data)) + data 148 | self.__writeBuffer += data 149 | self.__trySendBuffer() 150 | 151 | def fileno(self): 152 | return self.__fileno 153 | 154 | def disconnect(self): 155 | needCallDisconnect = False 156 | if self.__onDisconnected is not None and self.__state != CONNECTION_STATE.DISCONNECTED: 157 | needCallDisconnect = True 158 | self.sendRandKey = None 159 | self.recvRandKey = None 160 | self.recvLastTimestamp = 0 161 | if self.__socket is not None: 162 | self.__socket.close() 163 | self.__socket = None 164 | if self.__fileno is not None: 165 | self.__poller.unsubscribe(self.__fileno) 166 | self.__fileno = None 167 | self.__writeBuffer = bytes() 168 | self.__readBuffer = bytes() 169 | self.__state = CONNECTION_STATE.DISCONNECTED 170 | if needCallDisconnect: 171 | self.__onDisconnected() 172 | 173 | def getSendBufferSize(self): 174 | return len(self.__writeBuffer) 175 | 176 | def __processConnection(self, descr, eventType): 177 | poller = self.__poller 178 | if descr != self.__fileno: 179 | poller.unsubscribe(descr) 180 | return 181 | 182 | if eventType & POLL_EVENT_TYPE.ERROR: 183 | self.disconnect() 184 | return 185 | 186 | self.__processConnectionTimeout() 187 | if self.state == CONNECTION_STATE.DISCONNECTED: 188 | return 189 | 190 | if eventType & POLL_EVENT_TYPE.READ or eventType & POLL_EVENT_TYPE.WRITE: 191 | if self.__socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR): 192 | self.disconnect() 193 | return 194 | 195 | if self.__state == CONNECTION_STATE.CONNECTING: 196 | if self.__onConnected is not None: 197 | self.__onConnected() 198 | if self.__state == CONNECTION_STATE.DISCONNECTED: 199 | return 200 | self.__state = CONNECTION_STATE.CONNECTED 201 | self.__lastReadTime = monotonicTime() 202 | return 203 | 204 | if eventType & POLL_EVENT_TYPE.WRITE: 205 | self.__trySendBuffer() 206 | if self.__state == CONNECTION_STATE.DISCONNECTED: 207 | return 208 | event = POLL_EVENT_TYPE.READ | POLL_EVENT_TYPE.ERROR 209 | if len(self.__writeBuffer) > 0: 210 | event |= POLL_EVENT_TYPE.WRITE 211 | poller.subscribe(descr, self.__processConnection, event) 212 | 213 | if eventType & POLL_EVENT_TYPE.READ: 214 | self.__tryReadBuffer() 215 | if self.__state == CONNECTION_STATE.DISCONNECTED: 216 | return 217 | 218 | while True: 219 | message = self.__processParseMessage() 220 | if message is None: 221 | break 222 | if self.__onMessageReceived is not None: 223 | self.__onMessageReceived(message) 224 | if self.__state == CONNECTION_STATE.DISCONNECTED: 225 | return 226 | 227 | def __processConnectionTimeout(self): 228 | if monotonicTime() - self.__lastReadTime > self.__timeout: 229 | self.disconnect() 230 | return 231 | 232 | def __trySendBuffer(self): 233 | self.__processConnectionTimeout() 234 | if self.state == CONNECTION_STATE.DISCONNECTED: 235 | return 236 | while self.__processSend(): 237 | pass 238 | 239 | def __processSend(self): 240 | if not self.__writeBuffer: 241 | return False 242 | try: 243 | res = self.__socket.send(self.__writeBuffer) 244 | if res < 0: 245 | self.disconnect() 246 | return False 247 | if res == 0: 248 | return False 249 | self.__writeBuffer = self.__writeBuffer[res:] 250 | return True 251 | except socket.error as e: 252 | if e.errno not in (socket.errno.EAGAIN, socket.errno.EWOULDBLOCK): 253 | self.disconnect() 254 | return False 255 | 256 | def __tryReadBuffer(self): 257 | while self.__processRead(): 258 | pass 259 | self.__lastReadTime = monotonicTime() 260 | 261 | def __processRead(self): 262 | try: 263 | incoming = self.__socket.recv(self.__recvBufferSize) 264 | except socket.error as e: 265 | if e.errno not in (socket.errno.EAGAIN, socket.errno.EWOULDBLOCK): 266 | self.disconnect() 267 | return False 268 | if self.__socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR): 269 | self.disconnect() 270 | return False 271 | if not incoming: 272 | self.disconnect() 273 | return False 274 | self.__readBuffer += incoming 275 | return True 276 | 277 | def __processParseMessage(self): 278 | if len(self.__readBuffer) < 4: 279 | return None 280 | l = struct.unpack('i', self.__readBuffer[:4])[0] 281 | if len(self.__readBuffer) - 4 < l: 282 | return None 283 | data = self.__readBuffer[4:4 + l] 284 | try: 285 | if self.encryptor: 286 | dataTimestamp = self.encryptor.extract_timestamp(data) 287 | assert dataTimestamp >= self.recvLastTimestamp 288 | self.recvLastTimestamp = dataTimestamp 289 | # Unfortunately we can't get a timestamp and data in one go 290 | data = self.encryptor.decrypt(data) 291 | message = pickle.loads(zlib.decompress(data)) 292 | if self.recvRandKey: 293 | randKey, message = message 294 | assert randKey == self.recvRandKey 295 | except: 296 | # Why no logging of security errors? 297 | self.disconnect() 298 | return None 299 | self.__readBuffer = self.__readBuffer[4 + l:] 300 | return message 301 | 302 | @property 303 | def state(self): 304 | return self.__state 305 | -------------------------------------------------------------------------------- /pysyncobj/tcp_server.py: -------------------------------------------------------------------------------- 1 | import socket 2 | 3 | from .poller import POLL_EVENT_TYPE 4 | from .tcp_connection import TcpConnection, _getAddrType 5 | 6 | 7 | class SERVER_STATE: 8 | UNBINDED = 0, 9 | BINDED = 1 10 | 11 | 12 | class TcpServer(object): 13 | 14 | def __init__( 15 | self, poller, host, port, onNewConnection, 16 | sendBufferSize = 2 ** 13, 17 | recvBufferSize = 2 ** 13, 18 | connectionTimeout = 3.5, 19 | keepalive = None, 20 | ): 21 | self.__poller = poller 22 | self.__host = host 23 | self.__port = int(port) 24 | self.__hostAddrType = _getAddrType(host) 25 | self.__sendBufferSize = sendBufferSize 26 | self.__recvBufferSize = recvBufferSize 27 | self.__socket = None 28 | self.__fileno = None 29 | self.__keepalive = keepalive 30 | self.__state = SERVER_STATE.UNBINDED 31 | self.__onNewConnectionCallback = onNewConnection 32 | self.__connectionTimeout = connectionTimeout 33 | 34 | def bind(self): 35 | self.__socket = socket.socket(self.__hostAddrType, socket.SOCK_STREAM) 36 | self.__socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.__sendBufferSize) 37 | self.__socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.__recvBufferSize) 38 | self.__socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) 39 | self.__socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 40 | self.__socket.setblocking(0) 41 | self.__socket.bind((self.__host, self.__port)) 42 | self.__socket.listen(5) 43 | self.__fileno = self.__socket.fileno() 44 | self.__poller.subscribe(self.__fileno, 45 | self.__onNewConnection, 46 | POLL_EVENT_TYPE.READ | POLL_EVENT_TYPE.ERROR) 47 | self.__state = SERVER_STATE.BINDED 48 | 49 | def unbind(self): 50 | self.__state = SERVER_STATE.UNBINDED 51 | if self.__fileno is not None: 52 | self.__poller.unsubscribe(self.__fileno) 53 | self.__fileno = None 54 | if self.__socket is not None: 55 | self.__socket.close() 56 | 57 | def __onNewConnection(self, descr, event): 58 | if event & POLL_EVENT_TYPE.READ: 59 | try: 60 | sock, addr = self.__socket.accept() 61 | sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.__sendBufferSize) 62 | sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.__recvBufferSize) 63 | sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) 64 | sock.setblocking(0) 65 | conn = TcpConnection( 66 | poller=self.__poller, 67 | socket=sock, 68 | timeout=self.__connectionTimeout, 69 | sendBufferSize=self.__sendBufferSize, 70 | recvBufferSize=self.__recvBufferSize, 71 | keepalive=self.__keepalive, 72 | ) 73 | self.__onNewConnectionCallback(conn) 74 | except socket.error as e: 75 | if e.errno not in (socket.errno.EAGAIN, socket.errno.EWOULDBLOCK): 76 | self.unbind() 77 | return 78 | 79 | if event & POLL_EVENT_TYPE.ERROR: 80 | self.unbind() 81 | return 82 | -------------------------------------------------------------------------------- /pysyncobj/transport.py: -------------------------------------------------------------------------------- 1 | from .config import FAIL_REASON 2 | from .dns_resolver import globalDnsResolver 3 | from .monotonic import monotonic as monotonicTime 4 | from .node import Node, TCPNode 5 | from .tcp_connection import TcpConnection, CONNECTION_STATE 6 | from .tcp_server import TcpServer 7 | import functools 8 | import os 9 | import threading 10 | import time 11 | import random 12 | 13 | 14 | class TransportNotReadyError(Exception): 15 | """Transport failed to get ready for operation.""" 16 | 17 | 18 | class Transport(object): 19 | """Base class for implementing a transport between PySyncObj nodes""" 20 | 21 | def __init__(self, syncObj, selfNode, otherNodes): 22 | """ 23 | Initialise the transport 24 | 25 | :param syncObj: SyncObj 26 | :type syncObj: SyncObj 27 | :param selfNode: current server node, or None if this is a read-only node 28 | :type selfNode: Node or None 29 | :param otherNodes: partner nodes 30 | :type otherNodes: list of Node 31 | """ 32 | 33 | self._onMessageReceivedCallback = None 34 | self._onNodeConnectedCallback = None 35 | self._onNodeDisconnectedCallback = None 36 | self._onReadonlyNodeConnectedCallback = None 37 | self._onReadonlyNodeDisconnectedCallback = None 38 | self._onUtilityMessageCallbacks = {} 39 | 40 | def setOnMessageReceivedCallback(self, callback): 41 | """ 42 | Set the callback for when a message is received, or disable callback by passing None 43 | 44 | :param callback callback 45 | :type callback function(node: Node, message: any) or None 46 | """ 47 | 48 | self._onMessageReceivedCallback = callback 49 | 50 | def setOnNodeConnectedCallback(self, callback): 51 | """ 52 | Set the callback for when the connection to a (non-read-only) node is established, or disable callback by passing None 53 | 54 | :param callback callback 55 | :type callback function(node: Node) or None 56 | """ 57 | 58 | self._onNodeConnectedCallback = callback 59 | 60 | def setOnNodeDisconnectedCallback(self, callback): 61 | """ 62 | Set the callback for when the connection to a (non-read-only) node is terminated or is considered dead, or disable callback by passing None 63 | 64 | :param callback callback 65 | :type callback function(node: Node) or None 66 | """ 67 | 68 | self._onNodeDisconnectedCallback = callback 69 | 70 | def setOnReadonlyNodeConnectedCallback(self, callback): 71 | """ 72 | Set the callback for when a read-only node connects, or disable callback by passing None 73 | 74 | :param callback callback 75 | :type callback function(node: Node) or None 76 | """ 77 | 78 | self._onReadonlyNodeConnectedCallback = callback 79 | 80 | def setOnReadonlyNodeDisconnectedCallback(self, callback): 81 | """ 82 | Set the callback for when a read-only node disconnects (or the connection is lost), or disable callback by passing None 83 | 84 | :param callback callback 85 | :type callback function(node: Node) or None 86 | """ 87 | 88 | self._onReadonlyNodeDisconnectedCallback = callback 89 | 90 | def setOnUtilityMessageCallback(self, message, callback): 91 | """ 92 | Set the callback for when an utility message is received, or disable callback by passing None 93 | 94 | :param message: the utility message string (add, remove, set_version, and so on) 95 | :type message: str 96 | :param callback: callback 97 | :type callback: function(message: list, callback: function) or None 98 | """ 99 | 100 | if callback: 101 | self._onUtilityMessageCallbacks[message] = callback 102 | elif message in self._onUtilityMessageCallbacks: 103 | del self._onUtilityMessageCallbacks[message] 104 | 105 | # Helper functions so you don't need to check for the callbacks manually in subclasses 106 | def _onMessageReceived(self, node, message): 107 | if self._onMessageReceivedCallback is not None: 108 | self._onMessageReceivedCallback(node, message) 109 | 110 | def _onNodeConnected(self, node): 111 | if self._onNodeConnectedCallback is not None: 112 | self._onNodeConnectedCallback(node) 113 | 114 | def _onNodeDisconnected(self, node): 115 | if self._onNodeDisconnectedCallback is not None: 116 | self._onNodeDisconnectedCallback(node) 117 | 118 | def _onReadonlyNodeConnected(self, node): 119 | if self._onReadonlyNodeConnectedCallback is not None: 120 | self._onReadonlyNodeConnectedCallback(node) 121 | 122 | def _onReadonlyNodeDisconnected(self, node): 123 | if self._onReadonlyNodeDisconnectedCallback is not None: 124 | self._onReadonlyNodeDisconnectedCallback(node) 125 | 126 | def tryGetReady(self): 127 | """ 128 | Try to get the transport ready for operation. This may for example mean binding a server to a port. 129 | 130 | :raises TransportNotReadyError: if the transport fails to get ready for operation 131 | """ 132 | 133 | @property 134 | def ready(self): 135 | """ 136 | Whether the transport is ready for operation. 137 | 138 | :rtype bool 139 | """ 140 | 141 | return True 142 | 143 | def waitReady(self): 144 | """ 145 | Wait for the transport to be ready. 146 | 147 | :raises TransportNotReadyError: if the transport fails to get ready for operation 148 | """ 149 | 150 | def addNode(self, node): 151 | """ 152 | Add a node to the network 153 | 154 | :param node node to add 155 | :type node Node 156 | """ 157 | 158 | def dropNode(self, node): 159 | """ 160 | Remove a node from the network (meaning connections, buffers, etc. related to this node can be dropped) 161 | 162 | :param node node to drop 163 | :type node Node 164 | """ 165 | 166 | def send(self, node, message): 167 | """ 168 | Send a message to a node. 169 | The message should be picklable. 170 | The return value signifies whether the message is thought to have been sent successfully. It does not necessarily mean that the message actually arrived at the node. 171 | 172 | :param node target node 173 | :type node Node 174 | :param message message 175 | :type message any 176 | :returns success 177 | :rtype bool 178 | """ 179 | 180 | raise NotImplementedError 181 | 182 | def destroy(self): 183 | """ 184 | Destroy the transport 185 | """ 186 | 187 | 188 | class TCPTransport(Transport): 189 | def __init__(self, syncObj, selfNode, otherNodes): 190 | """ 191 | Initialise the TCP transport. On normal (non-read-only) nodes, this will start a TCP server. On all nodes, it will initiate relevant connections to other nodes. 192 | 193 | :param syncObj: SyncObj 194 | :type syncObj: SyncObj 195 | :param selfNode: current node (None if this is a read-only node) 196 | :type selfNode: TCPNode or None 197 | :param otherNodes: partner nodes 198 | :type otherNodes: iterable of TCPNode 199 | """ 200 | 201 | super(TCPTransport, self).__init__(syncObj, selfNode, otherNodes) 202 | self._syncObj = syncObj 203 | self._server = None 204 | self._connections = {} # Node object -> TcpConnection object 205 | self._unknownConnections = set() # set of TcpConnection objects 206 | self._selfNode = selfNode 207 | self._selfIsReadonlyNode = selfNode is None 208 | self._nodes = set() # set of TCPNode 209 | self._readonlyNodes = set() # set of Node 210 | self._nodeAddrToNode = {} # node ID/address -> TCPNode (does not include read-only nodes) 211 | self._lastConnectAttempt = {} # TPCNode -> float (seconds since epoch) 212 | self._preventConnectNodes = set() # set of TCPNode to which no (re)connection should be triggered on _connectIfNecessary; used via dropNode and destroy to cleanly remove a node 213 | self._readonlyNodesCounter = 0 214 | self._lastBindAttemptTime = 0 215 | self._bindAttempts = 0 216 | self._bindOverEvent = threading.Event() # gets triggered either when the server has either been bound correctly or when the number of bind attempts exceeds the config value maxBindRetries 217 | self._ready = False 218 | self._send_random_sleep_duration = 0 219 | 220 | self._syncObj.addOnTickCallback(self._onTick) 221 | 222 | for node in otherNodes: 223 | self.addNode(node) 224 | 225 | if not self._selfIsReadonlyNode: 226 | self._createServer() 227 | else: 228 | self._ready = True 229 | 230 | def _connToNode(self, conn): 231 | """ 232 | Find the node to which a connection belongs. 233 | 234 | :param conn: connection object 235 | :type conn: TcpConnection 236 | :returns corresponding node or None if the node cannot be found 237 | :rtype Node or None 238 | """ 239 | 240 | for node in self._connections: 241 | if self._connections[node] is conn: 242 | return node 243 | return None 244 | 245 | def tryGetReady(self): 246 | """ 247 | Try to bind the server if necessary. 248 | 249 | :raises TransportNotReadyError if the server could not be bound 250 | """ 251 | 252 | self._maybeBind() 253 | 254 | @property 255 | def ready(self): 256 | return self._ready 257 | 258 | def _createServer(self): 259 | """ 260 | Create the TCP server (but don't bind yet) 261 | """ 262 | 263 | conf = self._syncObj.conf 264 | bindAddr = conf.bindAddress 265 | seflAddr = getattr(self._selfNode, 'address') 266 | if bindAddr is not None: 267 | host, port = bindAddr.rsplit(':', 1) 268 | elif seflAddr is not None: 269 | host, port = seflAddr.rsplit(':', 1) 270 | if ':' in host: 271 | host = '::' 272 | else: 273 | host = '0.0.0.0' 274 | else: 275 | raise RuntimeError('Unable to determine bind address') 276 | 277 | if host != '0.0.0.0': 278 | host = globalDnsResolver().resolve(host) 279 | self._server = TcpServer(self._syncObj._poller, host, port, onNewConnection = self._onNewIncomingConnection, 280 | sendBufferSize = conf.sendBufferSize, 281 | recvBufferSize = conf.recvBufferSize, 282 | connectionTimeout = conf.connectionTimeout) 283 | 284 | def _maybeBind(self): 285 | """ 286 | Bind the server unless it is already bound, this is a read-only node, or the last attempt was too recently. 287 | 288 | :raises TransportNotReadyError if the bind attempt fails 289 | """ 290 | 291 | if self._ready or self._selfIsReadonlyNode or monotonicTime() < self._lastBindAttemptTime + self._syncObj.conf.bindRetryTime: 292 | return 293 | self._lastBindAttemptTime = monotonicTime() 294 | try: 295 | self._server.bind() 296 | except Exception as e: 297 | self._bindAttempts += 1 298 | if self._syncObj.conf.maxBindRetries and self._bindAttempts >= self._syncObj.conf.maxBindRetries: 299 | self._bindOverEvent.set() 300 | raise TransportNotReadyError 301 | else: 302 | self._ready = True 303 | self._bindOverEvent.set() 304 | 305 | def _onTick(self): 306 | """ 307 | Tick callback. Binds the server and connects to other nodes as necessary. 308 | """ 309 | 310 | try: 311 | self._maybeBind() 312 | except TransportNotReadyError: 313 | pass 314 | self._connectIfNecessary() 315 | 316 | def _onNewIncomingConnection(self, conn): 317 | """ 318 | Callback for connections initiated by the other side 319 | 320 | :param conn: connection object 321 | :type conn: TcpConnection 322 | """ 323 | 324 | self._unknownConnections.add(conn) 325 | encryptor = self._syncObj.encryptor 326 | if encryptor: 327 | conn.encryptor = encryptor 328 | conn.setOnMessageReceivedCallback(functools.partial(self._onIncomingMessageReceived, conn)) 329 | conn.setOnDisconnectedCallback(functools.partial(self._onDisconnected, conn)) 330 | 331 | def _onIncomingMessageReceived(self, conn, message): 332 | """ 333 | Callback for initial messages on incoming connections. Handles encryption, utility messages, and association of the connection with a Node. 334 | Once this initial setup is done, the relevant connected callback is executed, and further messages are deferred to the onMessageReceived callback. 335 | 336 | :param conn: connection object 337 | :type conn: TcpConnection 338 | :param message: received message 339 | :type message: any 340 | """ 341 | 342 | if self._syncObj.encryptor and not conn.sendRandKey: 343 | conn.sendRandKey = message 344 | conn.recvRandKey = os.urandom(32) 345 | conn.send(conn.recvRandKey) 346 | return 347 | 348 | # Utility messages 349 | if isinstance(message, list) and self._onUtilityMessage(conn, message): 350 | return 351 | 352 | # At this point, message should be either a node ID (i.e. address) or 'readonly' 353 | node = self._nodeAddrToNode[message] if message in self._nodeAddrToNode else None 354 | 355 | if node is None and message != 'readonly': 356 | conn.disconnect() 357 | self._unknownConnections.discard(conn) 358 | return 359 | 360 | readonly = node is None 361 | if readonly: 362 | nodeId = str(self._readonlyNodesCounter) 363 | node = Node(nodeId) 364 | self._readonlyNodes.add(node) 365 | self._readonlyNodesCounter += 1 366 | 367 | self._unknownConnections.discard(conn) 368 | self._connections[node] = conn 369 | conn.setOnMessageReceivedCallback(functools.partial(self._onMessageReceived, node)) 370 | if not readonly: 371 | self._onNodeConnected(node) 372 | else: 373 | self._onReadonlyNodeConnected(node) 374 | 375 | def _onUtilityMessage(self, conn, message): 376 | command = message[0] 377 | if command in self._onUtilityMessageCallbacks: 378 | message[0] = command.upper() 379 | callback = functools.partial(self._utilityCallback, conn = conn, args = message) 380 | try: 381 | self._onUtilityMessageCallbacks[command](message[1:], callback) 382 | except Exception as e: 383 | conn.send(str(e)) 384 | return True 385 | 386 | def _utilityCallback(self, res, err, conn, args): 387 | """ 388 | Callback for the utility messages 389 | 390 | :param res: result of the command 391 | :param err: error code (one of pysyncobj.config.FAIL_REASON) 392 | :param conn: utility connection 393 | :param args: command with arguments 394 | """ 395 | 396 | if not (err is None and res): 397 | cmdResult = 'SUCCESS' if err == FAIL_REASON.SUCCESS else 'FAIL' 398 | res = ' '.join(map(str, [cmdResult] + args)) 399 | conn.send(res) 400 | 401 | def _shouldConnect(self, node): 402 | """ 403 | Check whether this node should initiate a connection to another node 404 | 405 | :param node: the other node 406 | :type node: Node 407 | """ 408 | 409 | return isinstance(node, TCPNode) and node not in self._preventConnectNodes and (self._selfIsReadonlyNode or self._selfNode.address > node.address) 410 | 411 | def _connectIfNecessarySingle(self, node): 412 | """ 413 | Connect to a node if necessary. 414 | 415 | :param node: node to connect to 416 | :type node: Node 417 | """ 418 | 419 | if node in self._connections and self._connections[node].state != CONNECTION_STATE.DISCONNECTED: 420 | return True 421 | if not self._shouldConnect(node): 422 | return False 423 | assert node in self._connections # Since we "should connect" to this node, there should always be a connection object already in place. 424 | if node in self._lastConnectAttempt and monotonicTime() - self._lastConnectAttempt[node] < self._syncObj.conf.connectionRetryTime: 425 | return False 426 | self._lastConnectAttempt[node] = monotonicTime() 427 | return self._connections[node].connect(node.ip, node.port) 428 | 429 | def _connectIfNecessary(self): 430 | """ 431 | Connect to all nodes as necessary. 432 | """ 433 | 434 | for node in self._nodes: 435 | self._connectIfNecessarySingle(node) 436 | 437 | def _sendSelfAddress(self, conn): 438 | if self._selfIsReadonlyNode: 439 | conn.send('readonly') 440 | else: 441 | conn.send(self._selfNode.address) 442 | 443 | def _onOutgoingConnected(self, conn): 444 | """ 445 | Callback for when a new connection from this to another node is established. Handles encryption and informs the other node which node this is. 446 | If encryption is disabled, this triggers the onNodeConnected callback and messages are deferred to the onMessageReceived callback. 447 | If encryption is enabled, the first message is handled by _onOutgoingMessageReceived. 448 | 449 | :param conn: connection object 450 | :type conn: TcpConnection 451 | """ 452 | 453 | if self._syncObj.encryptor: 454 | conn.setOnMessageReceivedCallback(functools.partial(self._onOutgoingMessageReceived, conn)) # So we can process the sendRandKey 455 | conn.recvRandKey = os.urandom(32) 456 | conn.send(conn.recvRandKey) 457 | else: 458 | self._sendSelfAddress(conn) 459 | # The onMessageReceived callback is configured in addNode already. 460 | self._onNodeConnected(self._connToNode(conn)) 461 | 462 | def _onOutgoingMessageReceived(self, conn, message): 463 | """ 464 | Callback for receiving a message on a new outgoing connection. Used only if encryption is enabled to exchange the random keys. 465 | Once the key exchange is done, this triggers the onNodeConnected callback, and further messages are deferred to the onMessageReceived callback. 466 | 467 | :param conn: connection object 468 | :type conn: TcpConnection 469 | :param message: received message 470 | :type message: any 471 | """ 472 | 473 | if not conn.sendRandKey: 474 | conn.sendRandKey = message 475 | self._sendSelfAddress(conn) 476 | 477 | node = self._connToNode(conn) 478 | conn.setOnMessageReceivedCallback(functools.partial(self._onMessageReceived, node)) 479 | self._onNodeConnected(node) 480 | 481 | def _onDisconnected(self, conn): 482 | """ 483 | Callback for when a connection is terminated or considered dead. Initiates a reconnect if necessary. 484 | 485 | :param conn: connection object 486 | :type conn: TcpConnection 487 | """ 488 | 489 | self._unknownConnections.discard(conn) 490 | node = self._connToNode(conn) 491 | if node is not None: 492 | if node in self._nodes: 493 | self._onNodeDisconnected(node) 494 | self._connectIfNecessarySingle(node) 495 | else: 496 | self._readonlyNodes.discard(node) 497 | self._onReadonlyNodeDisconnected(node) 498 | 499 | def waitReady(self): 500 | """ 501 | Wait for the TCP transport to become ready for operation, i.e. the server to be bound. 502 | This method should be called from a different thread than used for the SyncObj ticks. 503 | 504 | :raises TransportNotReadyError: if the number of bind tries exceeds the configured limit 505 | """ 506 | 507 | self._bindOverEvent.wait() 508 | if not self._ready: 509 | raise TransportNotReadyError 510 | 511 | def addNode(self, node): 512 | """ 513 | Add a node to the network 514 | 515 | :param node: node to add 516 | :type node: TCPNode 517 | """ 518 | 519 | self._nodes.add(node) 520 | self._nodeAddrToNode[node.address] = node 521 | if self._shouldConnect(node): 522 | conn = TcpConnection( 523 | poller = self._syncObj._poller, 524 | timeout = self._syncObj.conf.connectionTimeout, 525 | sendBufferSize = self._syncObj.conf.sendBufferSize, 526 | recvBufferSize = self._syncObj.conf.recvBufferSize, 527 | keepalive = self._syncObj.conf.tcp_keepalive, 528 | ) 529 | conn.encryptor = self._syncObj.encryptor 530 | conn.setOnConnectedCallback(functools.partial(self._onOutgoingConnected, conn)) 531 | conn.setOnMessageReceivedCallback(functools.partial(self._onMessageReceived, node)) 532 | conn.setOnDisconnectedCallback(functools.partial(self._onDisconnected, conn)) 533 | self._connections[node] = conn 534 | 535 | def dropNode(self, node): 536 | """ 537 | Drop a node from the network 538 | 539 | :param node: node to drop 540 | :type node: Node 541 | """ 542 | 543 | conn = self._connections.pop(node, None) 544 | if conn is not None: 545 | # Calling conn.disconnect() immediately triggers the onDisconnected callback if the connection isn't already disconnected, so this is necessary to prevent the automatic reconnect. 546 | self._preventConnectNodes.add(node) 547 | conn.disconnect() 548 | self._preventConnectNodes.remove(node) 549 | if isinstance(node, TCPNode): 550 | self._nodes.discard(node) 551 | self._nodeAddrToNode.pop(node.address, None) 552 | else: 553 | self._readonlyNodes.discard(node) 554 | self._lastConnectAttempt.pop(node, None) 555 | 556 | def send(self, node, message): 557 | """ 558 | Send a message to a node. Returns False if the connection appears to be dead either before or after actually trying to send the message. 559 | 560 | :param node: target node 561 | :type node: Node 562 | :param message: message 563 | :param message: any 564 | :returns success 565 | :rtype bool 566 | """ 567 | 568 | if node not in self._connections or self._connections[node].state != CONNECTION_STATE.CONNECTED: 569 | return False 570 | if self._send_random_sleep_duration: 571 | time.sleep(random.random() * self._send_random_sleep_duration) 572 | self._connections[node].send(message) 573 | if self._connections[node].state != CONNECTION_STATE.CONNECTED: 574 | return False 575 | return True 576 | 577 | def destroy(self): 578 | """ 579 | Destroy this transport 580 | """ 581 | 582 | self.setOnMessageReceivedCallback(None) 583 | self.setOnNodeConnectedCallback(None) 584 | self.setOnNodeDisconnectedCallback(None) 585 | self.setOnReadonlyNodeConnectedCallback(None) 586 | self.setOnReadonlyNodeDisconnectedCallback(None) 587 | for node in self._nodes | self._readonlyNodes: 588 | self.dropNode(node) 589 | if self._server is not None: 590 | self._server.unbind() 591 | for conn in list(self._unknownConnections): 592 | conn.disconnect() 593 | self._unknownConnections = set() 594 | -------------------------------------------------------------------------------- /pysyncobj/utility.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | 4 | from .encryptor import getEncryptor 5 | from .node import Node, TCPNode 6 | from .poller import createPoller 7 | from .tcp_connection import TcpConnection 8 | 9 | 10 | class UtilityException(Exception): 11 | pass 12 | 13 | 14 | class Utility(object): 15 | 16 | def __init__(self, password=None, timeout=900.0): 17 | """ 18 | Initialise the utility object 19 | 20 | :param password: password for encryption 21 | :type password: str or None 22 | :param timeout: communication timeout 23 | :type timeout: float 24 | """ 25 | 26 | def executeCommand(self, node, command): 27 | """ 28 | Executes command on the given node. 29 | 30 | :param node: where to execute the command 31 | :type node: Node or str 32 | :param command: the command which should be sent 33 | :type command: list 34 | :returns: result 35 | :rtype: any object 36 | :raises: UtilityException in case of error 37 | """ 38 | 39 | 40 | class TcpUtility(Utility): 41 | 42 | def __init__(self, password=None, timeout=900.0): 43 | self.__timeout = timeout 44 | self.__poller = createPoller('auto') 45 | self.__connection = TcpConnection(self.__poller, 46 | onDisconnected=self.__onDisconnected, 47 | onMessageReceived=self.__onMessageReceived, 48 | onConnected=self.__onConnected, 49 | timeout=timeout) 50 | if password is not None: 51 | self.__connection.encryptor = getEncryptor(password) 52 | 53 | self.__result = None 54 | self.__error = None 55 | 56 | def executeCommand(self, node, command): 57 | self.__result = None 58 | self.__error = None 59 | 60 | if not isinstance(node, Node): 61 | try: 62 | node = TCPNode(node) 63 | except Exception: 64 | self.__error = 'invalid address to connect' 65 | return 66 | 67 | self.__isConnected = self.__connection.connect(node.ip, node.port) 68 | if not self.__isConnected: 69 | self.__error = "can't connected" 70 | return 71 | 72 | deadline = time.time() + self.__timeout 73 | 74 | self.__data = command 75 | while self.__isConnected: 76 | self.__poller.poll(0.5) 77 | if time.time() > deadline: 78 | self.__connection.disconnect() 79 | 80 | if self.__result is None: 81 | raise UtilityException(self.__error) 82 | 83 | return self.__result 84 | 85 | def __onMessageReceived(self, message): 86 | if self.__connection.encryptor and not self.__connection.sendRandKey: 87 | self.__connection.sendRandKey = message 88 | self.__connection.send(self.__data) 89 | return 90 | 91 | self.__result = message 92 | 93 | self.__connection.disconnect() 94 | 95 | def __onDisconnected(self): 96 | self.__isConnected = False 97 | if self.__result is None: 98 | self.__error = 'connection lost' 99 | 100 | def __onConnected(self): 101 | if self.__connection.encryptor: 102 | self.__connection.recvRandKey = os.urandom(32) 103 | self.__connection.send(self.__connection.recvRandKey) 104 | return 105 | 106 | self.__connection.send(self.__data) 107 | -------------------------------------------------------------------------------- /pysyncobj/version.py: -------------------------------------------------------------------------------- 1 | VERSION = '0.3.14' 2 | -------------------------------------------------------------------------------- /pysyncobj/win_inet_pton.py: -------------------------------------------------------------------------------- 1 | # This software released into the public domain. Anyone is free to copy, 2 | # modify, publish, use, compile, sell, or distribute this software, 3 | # either in source code form or as a compiled binary, for any purpose, 4 | # commercial or non-commercial, and by any means. 5 | 6 | import socket 7 | import ctypes 8 | import os 9 | 10 | 11 | class sockaddr(ctypes.Structure): 12 | _fields_ = [("sa_family", ctypes.c_short), 13 | ("__pad1", ctypes.c_ushort), 14 | ("ipv4_addr", ctypes.c_byte * 4), 15 | ("ipv6_addr", ctypes.c_byte * 16), 16 | ("__pad2", ctypes.c_ulong)] 17 | 18 | if hasattr(ctypes, 'windll'): 19 | WSAStringToAddressA = ctypes.windll.ws2_32.WSAStringToAddressA 20 | WSAAddressToStringA = ctypes.windll.ws2_32.WSAAddressToStringA 21 | else: 22 | def not_windows(): 23 | raise SystemError( 24 | "Invalid platform. ctypes.windll must be available." 25 | ) 26 | WSAStringToAddressA = not_windows 27 | WSAAddressToStringA = not_windows 28 | 29 | 30 | def inet_pton(address_family, ip_string): 31 | addr = sockaddr() 32 | addr.sa_family = address_family 33 | addr_size = ctypes.c_int(ctypes.sizeof(addr)) 34 | 35 | if WSAStringToAddressA( 36 | ip_string, 37 | address_family, 38 | None, 39 | ctypes.byref(addr), 40 | ctypes.byref(addr_size) 41 | ) != 0: 42 | raise socket.error(ctypes.FormatError()) 43 | 44 | if address_family == socket.AF_INET: 45 | return ctypes.string_at(addr.ipv4_addr, 4) 46 | if address_family == socket.AF_INET6: 47 | return ctypes.string_at(addr.ipv6_addr, 16) 48 | 49 | raise socket.error('unknown address family') 50 | 51 | 52 | def inet_ntop(address_family, packed_ip): 53 | addr = sockaddr() 54 | addr.sa_family = address_family 55 | addr_size = ctypes.c_int(ctypes.sizeof(addr)) 56 | ip_string = ctypes.create_string_buffer(128) 57 | ip_string_size = ctypes.c_int(ctypes.sizeof(ip_string)) 58 | 59 | if address_family == socket.AF_INET: 60 | if len(packed_ip) != ctypes.sizeof(addr.ipv4_addr): 61 | raise socket.error('packed IP wrong length for inet_ntoa') 62 | ctypes.memmove(addr.ipv4_addr, packed_ip, 4) 63 | elif address_family == socket.AF_INET6: 64 | if len(packed_ip) != ctypes.sizeof(addr.ipv6_addr): 65 | raise socket.error('packed IP wrong length for inet_ntoa') 66 | ctypes.memmove(addr.ipv6_addr, packed_ip, 16) 67 | else: 68 | raise socket.error('unknown address family') 69 | 70 | if WSAAddressToStringA( 71 | ctypes.byref(addr), 72 | addr_size, 73 | None, 74 | ip_string, 75 | ctypes.byref(ip_string_size) 76 | ) != 0: 77 | raise socket.error(ctypes.FormatError()) 78 | 79 | return ip_string[:ip_string_size.value - 1] 80 | 81 | # Adding our two functions to the socket library 82 | if os.name == 'nt': 83 | socket.inet_pton = inet_pton 84 | socket.inet_ntop = inet_ntop 85 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description-file = README.md 3 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | from pysyncobj.version import VERSION 3 | 4 | description='A library for replicating your python class between multiple servers, based on raft protocol' 5 | try: 6 | import pypandoc 7 | long_description = pypandoc.convert('README.md', 'rst') 8 | except(IOError, ImportError, RuntimeError): 9 | long_description = description 10 | 11 | setup( 12 | name='pysyncobj', 13 | packages=['pysyncobj'], 14 | version=VERSION, 15 | description=description, 16 | long_description=long_description, 17 | author='Filipp Ozinov', 18 | author_email='fippo@mail.ru', 19 | license='MIT', 20 | url='https://github.com/bakwc/PySyncObj', 21 | download_url='https://github.com/bakwc/PySyncObj/tarball/' + VERSION, 22 | keywords=['network', 'replication', 'raft', 'synchronization'], 23 | classifiers=[ 24 | 'Topic :: System :: Networking', 25 | 'Topic :: System :: Distributed Computing', 26 | 'Intended Audience :: Developers', 27 | 'Programming Language :: Python :: 2.7', 28 | 'Programming Language :: Python :: 3.4', 29 | 'Programming Language :: Python :: 3.5', 30 | 'Operating System :: POSIX :: Linux', 31 | 'Operating System :: MacOS :: MacOS X', 32 | 'License :: OSI Approved :: MIT License', 33 | ], 34 | entry_points={ 35 | 'console_scripts': [ 36 | 'syncobj_admin=pysyncobj.syncobj_admin:main', 37 | ], 38 | }, 39 | ) 40 | -------------------------------------------------------------------------------- /syncobj_admin.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from pysyncobj.syncobj_admin import main 4 | 5 | if __name__ == '__main__': 6 | main() 7 | -------------------------------------------------------------------------------- /test_zerodowntime/README.md: -------------------------------------------------------------------------------- 1 | test.py is a script to test zero-downtime upgrades between two versions of the code. Use `python3 test.py -h` to see its options. 2 | 3 | The basic operation is that test.py spawns a cluster on the local machine. This cluster is simply a distributed counter. test.py then sends increment commands to the cluster processes in a random (but controllable) way. One after the other, it also takes a process down, upgrades its code, and restarts it again a bit later. The other processes continue working, i.e. the cluster should still be functional ("zero downtime"). At the end, the Raft logs and counter values from all processes are compared to check that everything was working correctly. 4 | 5 | proc.py is the script executed by the individual processes spawned by test.py. It takes commands via stdin and replies via stdout. 6 | -------------------------------------------------------------------------------- /test_zerodowntime/proc.py: -------------------------------------------------------------------------------- 1 | import pysyncobj 2 | import pysyncobj.testrevision 3 | import sys 4 | import time 5 | 6 | class MyCounter(pysyncobj.SyncObj): 7 | def __init__(self, selfAddr, otherAddrs, **kwargs): 8 | super(MyCounter, self).__init__(selfAddr, otherAddrs, **kwargs) 9 | self._counter = 0 10 | 11 | @pysyncobj.replicated 12 | def incCounter(self): 13 | self._counter += 1 14 | 15 | def getCounter(self): 16 | return self._counter 17 | 18 | 19 | def main(argv = sys.argv[1:]):#, stdin = sys.stdin): 20 | selfAddr = argv[0] 21 | otherAddrs = argv[1:] 22 | conf = pysyncobj.SyncObjConf() 23 | conf.journalFile = './journal' 24 | conf.fullDumpFile = './dump' 25 | counter = MyCounter(selfAddr, otherAddrs, conf = conf) 26 | 27 | print('{} ready at {}'.format(selfAddr, pysyncobj.testrevision.rev), file = sys.stderr) 28 | 29 | while True: 30 | line = sys.stdin.readline().strip() 31 | 32 | if line == 'wait': 33 | time.sleep(2) 34 | print('waited', flush = True) 35 | elif line == 'increment': 36 | while True: 37 | try: 38 | counter.incCounter(sync = True) 39 | except pysyncobj.SyncObjException as e: 40 | print('{} increment yielded SyncObjException with error code {}, retrying'.format(selfAddr, e.errorCode), file = sys.stderr) 41 | else: 42 | break 43 | print('incremented', flush = True) 44 | elif line == 'print': 45 | print(counter.getCounter(), flush = True) 46 | elif line == 'printlog': 47 | print(repr(counter._SyncObj__raftLog[:]).replace('\n', ' '), flush = True) 48 | elif line == 'quit' or line == '': 49 | break 50 | else: 51 | print('Got unknown command: {}'.format(line), file = sys.stderr) 52 | 53 | 54 | if __name__ == '__main__': 55 | main() 56 | -------------------------------------------------------------------------------- /test_zerodowntime/test.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import contextlib 3 | import os 4 | import random 5 | import shutil 6 | import subprocess 7 | import sys 8 | import tempfile 9 | import time 10 | 11 | 12 | # Change directory context manager from https://stackoverflow.com/a/24176022 13 | @contextlib.contextmanager 14 | def cd(newdir): 15 | prevdir = os.getcwd() 16 | os.chdir(os.path.expanduser(newdir)) 17 | try: 18 | yield 19 | finally: 20 | os.chdir(prevdir) 21 | 22 | 23 | # Parse arguments 24 | parser = argparse.ArgumentParser(formatter_class = argparse.ArgumentDefaultsHelpFormatter) 25 | parser.add_argument('revA', help = 'path or git revision for the "old" version. When it is a path, it must be the directory containing the pysyncobj package. When it is a git revision, the parent directory of the directory containing this script must be the git repository, and this repository must contain the revision (i.e. run this script from within the repository).') 26 | parser.add_argument('revB', help = 'path or git revision for the "new" version') 27 | parser.add_argument('cycles', nargs = '?', type = int, default = 120, help = 'Number of cycles to run; must be at least ten times the number of processes') 28 | parser.add_argument('processes', nargs = '?', type = int, default = 10, help = 'Number of parallel processes; must be at least 3') 29 | parser.add_argument('seed', nargs = '?', type = int, default = None, help = 'Seed for PRNG. Using the same seed value produces the exact same order of operations *in this test script*, i.e. outside of PySyncObj. Everything inside the cluster, e.g. which node is elected leader and when, is essentially still completely random.') 30 | args = parser.parse_args() 31 | 32 | if args.processes < 3: 33 | print('Testing with less than 3 processes makes no sense', file = sys.stderr) 34 | sys.exit(1) 35 | 36 | if args.cycles < args.processes * 10: 37 | print('Needs at least ten times as many cycles as there are processes to get useful results', file = sys.stderr) 38 | sys.exit(1) 39 | 40 | workingDir = os.path.abspath(os.path.dirname(__file__)) 41 | 42 | # Seed 43 | seed = args.seed 44 | if seed is None: 45 | seed = random.randint(0, 2**32 - 1) 46 | print('Seed: {}'.format(seed)) 47 | random.seed(seed) 48 | 49 | # Generate command to be executed at each cycle 50 | commands = [] # list of tuples (proc index, command) 51 | # Commands: 52 | # 'increment' -- send an increment command to the process, wait until it returns 'incremented' 53 | # 'compare' -- compare the value across all processes, verify that the majority has the same, expected value; proc index is irrelevant in this case 54 | # 'upgrade' -- quit the process, upgrade the code, restart the process 55 | for i in range(args.cycles): 56 | cmd = random.choice(('increment', 'increment', 'increment', 'increment', 'compare')) # 80 % increment, 20 % compare 57 | proc = random.randrange(args.processes) 58 | commands.append((proc, cmd)) 59 | 60 | upgrades = list(range(args.processes)) 61 | random.shuffle(upgrades) 62 | # First upgrade at 20 % of the cycles, last at 80 %, equal cycle distance between 63 | # This, combined with the cycles >= 10 * processes requirement, also ensures that the upgrades don't overlap. 64 | # Each upgrade takes 3 cycles plus the startup time of the new process, which shouldn't be much worse than 1-2 cycles. 65 | # 60 % of the cycles must therefore be at least 5 times the number of processes, i.e. cycles >= 5/0.6 * processes = 8.33 * processes. 66 | for i in range(args.processes): 67 | upgradeCycle = int((0.2 + 0.6 * i / (args.processes - 1)) * args.cycles) 68 | commands[upgradeCycle] = (upgrades[i], 'upgrade') 69 | # Ensure that this process doesn't receive any increment operations while it's upgrading 70 | for j in range(upgradeCycle, upgradeCycle + 3): 71 | if commands[j][1] == 'increment': 72 | while commands[j][0] == upgrades[i]: 73 | commands[j] = (random.randrange(args.processes), 'increment') 74 | 75 | # Generate node addresses 76 | addrs = ['127.0.0.1:{}'.format(42000 + i) for i in range(args.processes)] 77 | 78 | status = 0 79 | 80 | # Set up temporary directory 81 | with tempfile.TemporaryDirectory() as tmpdirname: 82 | with cd(tmpdirname): 83 | os.mkdir('revA') 84 | os.mkdir('revB') 85 | 86 | # Check out revisions into the temporary directory 87 | for revArg, revTarget in ((args.revA, 'revA'), (args.revB, 'revB')): 88 | if os.path.isdir(os.path.join(workingDir, revArg)): 89 | # Copy directory contents to ./revTarget; I like rsync... 90 | if subprocess.call(['rsync', '-a', os.path.join(workingDir, revArg, ''), os.path.join(revTarget, '')]) != 0: 91 | print('rsync of {} failed'.format(revTarget), file = sys.stderr) 92 | sys.exit(1) 93 | else: 94 | with cd(os.path.join(workingDir, '..')): #TODO: Replace with GIT_DIR environment variable or something 95 | gitProc = subprocess.Popen(['git', 'archive', revArg], stdout = subprocess.PIPE) 96 | tarProc = subprocess.Popen(['tar', '-x', '-C', os.path.join(tmpdirname, revTarget), '--strip-components', '1', 'pysyncobj'], stdin = gitProc.stdout) 97 | gitProc.stdout.close() 98 | tarProc.communicate() 99 | if tarProc.returncode != 0: 100 | print('git or tar of {} failed'.format(revTarget), file = sys.stderr) 101 | sys.exit(1) 102 | 103 | with open(os.path.join(revTarget, 'testrevision.py'), 'w') as fp: 104 | fp.write('rev = {!r}'.format(revTarget)) 105 | 106 | # Create each process's directory and initialise it with the revision A 107 | for i in range(args.processes): 108 | os.mkdir('proc{}'.format(i)) 109 | os.mkdir(os.path.join('proc{}'.format(i), 'pysyncobj')) 110 | if subprocess.call(['rsync', '-a', os.path.join('revA', ''), os.path.join('proc{}'.format(i), 'pysyncobj', '')]) != 0: 111 | print('rsync of revA to proc{} failed'.format(i), file = sys.stderr) 112 | sys.exit(1) 113 | if subprocess.call(['rsync', '-a', os.path.join(workingDir, 'proc.py'), os.path.join('proc{}'.format(i), '')]) != 0: 114 | print('rsync of proc.py to proc{} failed'.format(i), file = sys.stderr) 115 | sys.exit(1) 116 | 117 | procs = [] 118 | 119 | try: 120 | # Launch processes 121 | for i in range(args.processes): 122 | with cd('proc{}'.format(i)): 123 | procs.append(subprocess.Popen(['python3', 'proc.py', addrs[i]] + [addrs[j] for j in range(args.processes) if j != i], stdin = subprocess.PIPE, stdout = subprocess.PIPE, bufsize = 0)) 124 | 125 | # Randomly run commands on the custer and upgrade the processes one-by-one, ensuring that everything's still fine after each step 126 | counter = 0 # The expected value of the counter 127 | restart = -1 # Variable for when to restart a process; set to 3 on the 'upgrade' command, counted down on each command, the upgraded process is restarted when it reaches zero 128 | upgradingProcId = None # The procId that is currently upgrading 129 | for procId, command in commands: 130 | if command == 'increment': 131 | assert procId != upgradingProcId, 'previous upgrade hasn''t finished' 132 | 133 | print('Sending increment to proc{}'.format(procId)) 134 | 135 | # Send command 136 | procs[procId].stdin.write(b'increment\n') 137 | procs[procId].stdin.flush() 138 | 139 | # Wait until process is done with incrementing 140 | procs[procId].stdout.readline() 141 | 142 | counter += 1 143 | elif command == 'compare': 144 | print('Comparing') 145 | 146 | # Compare the *logs* of the processes 147 | # Comparing the values of the counter doesn't work because the commands might not have been applied yet. 148 | # So if the values don't match, that doesn't mean that replication is broken. 149 | # The log reflects what's actually replicated. 150 | 151 | for i in range(args.processes): 152 | if i == upgradingProcId: 153 | continue 154 | procs[i].stdin.write(b'printlog\n') 155 | procs[i].stdin.flush() 156 | logs = [procs[i].stdout.readline().strip() if i != upgradingProcId else None for i in range(args.processes)] 157 | 158 | # Ensure that a majority of the logs are equal; note that this doesn't verify that all increments were actually replicated. 159 | 160 | ok = False 161 | for i in range((args.processes + 1) // 2): 162 | count = 1 163 | for j in range(i, args.processes): 164 | if logs[i] == logs[j]: 165 | count += 1 166 | if count >= args.processes // 2 + 1: 167 | ok = True 168 | break 169 | if not ok: 170 | print('Didn''t find at least {} matching logs'.format(args.processes // 2 + 1), file = sys.stderr) 171 | for i in range(args.processes): 172 | print('proc{} log: {}'.format(i, logs[i].decode('utf-8')), file = sys.stderr) 173 | sys.exit(1) 174 | elif command == 'upgrade': 175 | assert upgradingProcId is None, 'previous upgrade hasn''t finished' 176 | 177 | print('Taking down proc{} for upgrade'.format(procId)) 178 | 179 | # Let the process finish gracefully 180 | procs[procId].stdin.write(b'quit\n') 181 | procs[procId].stdin.flush() 182 | procs[procId].wait() 183 | 184 | # Delete revA code 185 | shutil.rmtree(os.path.join('proc{}'.format(procId), 'pysyncobj')) 186 | os.mkdir(os.path.join('proc{}'.format(procId), 'pysyncobj')) 187 | 188 | # Copy revB 189 | if subprocess.call(['rsync', '-a', os.path.join('revB', ''), os.path.join('proc{}'.format(procId), 'pysyncobj', '')]) != 0: 190 | print('rsync of revB to proc{} failed'.format(procId), file = sys.stderr) 191 | sys.exit(1) 192 | 193 | upgradingProcId = procId 194 | restart = 3 195 | 196 | restart -= 1 197 | if restart == 0: 198 | print('Restarting proc{}'.format(upgradingProcId)) 199 | with cd('proc{}'.format(upgradingProcId)): 200 | procs[upgradingProcId] = subprocess.Popen(['python3', 'proc.py', addrs[upgradingProcId]] + [addrs[j] for j in range(args.processes) if j != upgradingProcId], stdin = subprocess.PIPE, stdout = subprocess.PIPE, bufsize = 0) 201 | upgradingProcId = None 202 | 203 | print('Final comparison...') 204 | 205 | # Give the processes some time to catch up 206 | time.sleep(5) 207 | 208 | # Check that all logs are the same, and that all counter values are equal to the expected value 209 | for i in range(args.processes): 210 | procs[i].stdin.write(b'printlog\n') 211 | procs[i].stdin.flush() 212 | logs = [procs[i].stdout.readline().strip() for i in range(args.processes)] 213 | 214 | for i in range(args.processes): 215 | procs[i].stdin.write(b'print\n') 216 | procs[i].stdin.flush() 217 | counters = [int(procs[i].stdout.readline().strip()) for i in range(args.processes)] 218 | 219 | if not all(x == logs[0] for x in logs): 220 | print('ERROR: not all logs are equal', file = sys.stderr) 221 | for i in range(args.processes): 222 | print('proc{} log: {}'.format(i, logs[i].decode('utf-8')), file = sys.stderr) 223 | status = 1 224 | elif not all(x == counter for x in counters): 225 | print('ERROR: not all counters are equal to the expected value {}: {}'.format(counter, counters), file = sys.stderr) 226 | status = 1 227 | else: 228 | print('OK', file = sys.stderr) 229 | 230 | print('Sending quit command', file = sys.stderr) 231 | for i in range(args.processes): 232 | procs[i].stdin.write(b'quit\n') 233 | for i in range(args.processes): 234 | procs[i].communicate() 235 | except: 236 | print('Killing processes', file = sys.stderr) 237 | for proc in procs: 238 | proc.kill() 239 | raise 240 | 241 | sys.exit(status) 242 | --------------------------------------------------------------------------------