├── .coveragerc ├── .gitignore ├── .travis.yml ├── AUTHORS ├── CHANGES.md ├── LICENSE ├── MANIFEST.in ├── Makefile ├── README.md ├── docs ├── Makefile ├── conf.py ├── index.rst ├── make.bat ├── modules │ ├── hystrix.circuitbreaker.rst │ ├── hystrix.command.rst │ ├── hystrix.command_metrics.rst │ ├── hystrix.command_properties.rst │ ├── hystrix.event_type.rst │ ├── hystrix.group.rst │ ├── hystrix.metrics.rst │ ├── hystrix.pool.rst │ ├── hystrix.pool_metrics.rst │ ├── hystrix.rolling_number.rst │ ├── hystrix.rolling_percentile.rst │ ├── hystrix.rst │ ├── hystrix.strategy.eventnotifier.event_notifier.rst │ ├── hystrix.strategy.eventnotifier.event_notifier_default.rst │ ├── hystrix.strategy.eventnotifier.rst │ ├── hystrix.strategy.rst │ └── modules.rst └── requirements.txt ├── hystrix ├── __init__.py ├── circuitbreaker.py ├── command.py ├── command_metrics.py ├── command_properties.py ├── event_type.py ├── group.py ├── metrics.py ├── pool.py ├── pool_metrics.py ├── rolling_number.py ├── rolling_percentile.py └── strategy │ ├── __init__.py │ └── eventnotifier │ ├── __init__.py │ ├── event_notifier.py │ └── event_notifier_default.py ├── repos.sh ├── setup.cfg ├── setup.py ├── tests ├── __init__.py ├── sample_data.py ├── test_circuitbreaker.py ├── test_command.py ├── test_command_metrics.py ├── test_command_properties.py ├── test_group.py ├── test_metrics.py ├── test_pool.py ├── test_pool_metrics.py ├── test_rolling_number.py ├── test_rolling_percentile.py └── utils.py └── tox.ini /.coveragerc: -------------------------------------------------------------------------------- 1 | [report] 2 | omit = 3 | hystrix/__init__.py 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Backup files 2 | *.~ 3 | 4 | # Byte-compiled / optimized / DLL files 5 | __pycache__/ 6 | *.py[cod] 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Distribution / packaging 12 | bin/ 13 | build/ 14 | develop-eggs/ 15 | dist/ 16 | eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | *.egg-info/ 23 | .installed.cfg 24 | *.egg 25 | .eggs 26 | MANIFEST 27 | 28 | # Installer logs 29 | pip-log.txt 30 | pip-delete-this-directory.txt 31 | 32 | # Unit test / coverage reports 33 | .tox/ 34 | .coverage 35 | .cache 36 | nosetests.xml 37 | coverage.xml 38 | .coverage* 39 | 40 | # Translations 41 | *.mo 42 | 43 | # Sphinx documentation 44 | docs/_build/ 45 | 46 | # vim 47 | *.swp 48 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | 3 | python: 4 | - "3.3" 5 | - "3.4" 6 | - "3.5" 7 | - "3.6" 8 | 9 | install: pip install tox-travis 10 | 11 | script: 12 | - tox 13 | 14 | after_success: 15 | - pip install coveralls 16 | - coveralls 17 | 18 | branches: 19 | only: 20 | - master 21 | 22 | notifications: 23 | email: false 24 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | Wiliam Souza 2 | -------------------------------------------------------------------------------- /CHANGES.md: -------------------------------------------------------------------------------- 1 | ### 0.1.0 2 | 3 | * First release 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright 2014 Hystrix Python Authors. 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include AUTHORS 3 | include LICENSE 4 | include CHANGES.md 5 | exclude Makefile 6 | exclude tox.ini 7 | exclude *.sh 8 | prune docs 9 | prune tests 10 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | clean: clean-eggs clean-build 3 | @find . -iname '*.pyc' -delete 4 | @find . -iname '*.pyo' -delete 5 | @find . -iname '*~' -delete 6 | @find . -iname '*.swp' -delete 7 | @find . -iname '__pycache__' -delete 8 | 9 | clean-eggs: 10 | @find . -name '*.egg' -print0|xargs -0 rm -rf -- 11 | @rm -rf .eggs/ 12 | 13 | clean-build: 14 | @rm -fr build/ 15 | @rm -fr dist/ 16 | @rm -fr *.egg-info 17 | 18 | clean-api-doc: 19 | @rm -rf docs/modules/* 20 | 21 | api-doc: 22 | @sphinx-apidoc -e -o docs/modules/ hystrix/ 23 | 24 | test: 25 | python setup.py test 26 | 27 | release: clean 28 | git tag `python setup.py -q version` 29 | git push origin `python setup.py -q version` 30 | python setup.py sdist 31 | python setup.py bdist_wheel 32 | twine upload dist/* 33 | 34 | rst: 35 | @pandoc --from=markdown --to=rst --output=README.rst README.md 36 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | hystrix-py 2 | ========== 3 | 4 | [![Build Status](https://travis-ci.org/wiliamsouza/hystrix-py.svg) 5 | ](https://travis-ci.org/wiliamsouza/hystrix-py) 6 | [![Coverage Status](https://img.shields.io/coveralls/wiliamsouza/hystrix-py.svg)](https://coveralls.io/r/wiliamsouza/hystrix-py) 7 | [![Documentation Status](https://readthedocs.org/projects/hystrix-py/badge/?version=latest)](https://readthedocs.org/projects/hystrix-py/?badge=latest) 8 | 9 | A Netflix Hystrix port to Python. 10 | 11 | **This is a work in progress, please feel free to help!** 12 | 13 | 14 | What is Hystrix? 15 | ---------------- 16 | 17 | For more information see the [Netflix Hystrix] 18 | (https://github.com/Netflix/Hystrix/wiki) Wiki documentation. 19 | 20 | 21 | How it works 22 | ------------ 23 | 24 | To know more see the [Netflix Hystrix] 25 | (https://github.com/Netflix/Hystrix/wiki/How-it-Works) Wiki How it works 26 | section documentation. 27 | 28 | 29 | Features 30 | -------- 31 | 32 | It's **ALPHA** version and only support launching a group of commands inside 33 | an executor pool. 34 | 35 | * Execute synchronous commands. 36 | * Execute asynchronous commands. 37 | * Execute asynchronous commands and attach a callback. 38 | 39 | 40 | Requirements 41 | ------------ 42 | 43 | It depends on [concurrent.futures] 44 | (https://docs.python.org/3/library/concurrent.futures.html), new in Python 45 | version 3.2 and [enum] 46 | (https://docs.python.org/3.4/library/enum.html), new in Python version 3.4. 47 | It uses [futures](https://pypi.python.org/pypi/futures) and 48 | [enum34](https://pypi.python.org/pypi/enum34/) backports to run in Python 49 | version 2.7, 3.3 and 3.4. 50 | 51 | 52 | Installation 53 | ------------ 54 | 55 | Create a virtualenv: 56 | 57 | ``` 58 | mkproject --python= hystrix-py 59 | ``` 60 | 61 | Get the code: 62 | 63 | ``` 64 | git clone https://github.com/wiliamsouza/hystrix-py . 65 | ``` 66 | 67 | Install it: 68 | 69 | ``` 70 | python setup.py develop 71 | ``` 72 | 73 | The last command enter your code in "Development Mode" it creates an 74 | `egg-link` in your virtualenv's `site-packages` making it available 75 | on this environment `sys.path`. For more info see [setuptools development-mode] 76 | (https://pythonhosted.org/setuptools/setuptools.html#development-mode) 77 | 78 | 79 | Development and test dependencies 80 | --------------------------------- 81 | 82 | `setup.py` will handle test dependencies, to install development use: 83 | 84 | ``` 85 | pip install -e .[dev] 86 | ``` 87 | 88 | 89 | Tests 90 | ----- 91 | 92 | ``` 93 | python setup.py test 94 | ``` 95 | 96 | 97 | Hello world 98 | ----------- 99 | 100 | Code to be isolated is wrapped inside the `run()` method of a `hystrix.Command` similar to the following: 101 | 102 | ```python 103 | from hystrix import Command 104 | 105 | class HelloWorldCommand(Command): 106 | def run(self): 107 | return 'Hello World' 108 | ``` 109 | 110 | This command could be used like this: 111 | 112 | ```python 113 | command = HelloCommand() 114 | 115 | # synchronously 116 | print(command.execute()) 117 | 'Hello World' 118 | 119 | # asynchronously 120 | future = command.queue() 121 | print(future.result()) 122 | 'Hello Wold' 123 | 124 | # callback 125 | def print_result(future) 126 | print(future.result()) 127 | 128 | future = command.observe() 129 | future.add_done_callback(print_result) 130 | ``` 131 | 132 | LICENSE 133 | ------- 134 | 135 | Copyright 2015 Hystrix Python Authors. 136 | 137 | Licensed under the Apache License, Version 2.0 (the "License"); 138 | you may not use this file except in compliance with the License. 139 | You may obtain a copy of the License at 140 | 141 | 142 | 143 | Unless required by applicable law or agreed to in writing, software 144 | distributed under the License is distributed on an "AS IS" BASIS, 145 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 146 | See the License for the specific language governing permissions and 147 | limitations under the License. 148 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 21 | 22 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext 23 | 24 | help: 25 | @echo "Please use \`make ' where is one of" 26 | @echo " html to make standalone HTML files" 27 | @echo " dirhtml to make HTML files named index.html in directories" 28 | @echo " singlehtml to make a single large HTML file" 29 | @echo " pickle to make pickle files" 30 | @echo " json to make JSON files" 31 | @echo " htmlhelp to make HTML files and a HTML help project" 32 | @echo " qthelp to make HTML files and a qthelp project" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 36 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 38 | @echo " text to make text files" 39 | @echo " man to make manual pages" 40 | @echo " texinfo to make Texinfo files" 41 | @echo " info to make Texinfo files and run them through makeinfo" 42 | @echo " gettext to make PO message catalogs" 43 | @echo " changes to make an overview of all changed/added/deprecated items" 44 | @echo " xml to make Docutils-native XML files" 45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 46 | @echo " linkcheck to check all external links for integrity" 47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 48 | 49 | clean: 50 | rm -rf $(BUILDDIR)/* 51 | 52 | html: 53 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 54 | @echo 55 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 56 | 57 | dirhtml: 58 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 59 | @echo 60 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 61 | 62 | singlehtml: 63 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 64 | @echo 65 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 66 | 67 | pickle: 68 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 69 | @echo 70 | @echo "Build finished; now you can process the pickle files." 71 | 72 | json: 73 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 74 | @echo 75 | @echo "Build finished; now you can process the JSON files." 76 | 77 | htmlhelp: 78 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 79 | @echo 80 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 81 | ".hhp project file in $(BUILDDIR)/htmlhelp." 82 | 83 | qthelp: 84 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 85 | @echo 86 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 87 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 88 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/HystrixPython.qhcp" 89 | @echo "To view the help file:" 90 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/HystrixPython.qhc" 91 | 92 | devhelp: 93 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 94 | @echo 95 | @echo "Build finished." 96 | @echo "To view the help file:" 97 | @echo "# mkdir -p $$HOME/.local/share/devhelp/HystrixPython" 98 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/HystrixPython" 99 | @echo "# devhelp" 100 | 101 | epub: 102 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 103 | @echo 104 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 105 | 106 | latex: 107 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 108 | @echo 109 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 110 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 111 | "(use \`make latexpdf' here to do that automatically)." 112 | 113 | latexpdf: 114 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 115 | @echo "Running LaTeX files through pdflatex..." 116 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 117 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 118 | 119 | latexpdfja: 120 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 121 | @echo "Running LaTeX files through platex and dvipdfmx..." 122 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 123 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 124 | 125 | text: 126 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 127 | @echo 128 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 129 | 130 | man: 131 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 132 | @echo 133 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 134 | 135 | texinfo: 136 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 137 | @echo 138 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 139 | @echo "Run \`make' in that directory to run these through makeinfo" \ 140 | "(use \`make info' here to do that automatically)." 141 | 142 | info: 143 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 144 | @echo "Running Texinfo files through makeinfo..." 145 | make -C $(BUILDDIR)/texinfo info 146 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 147 | 148 | gettext: 149 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 150 | @echo 151 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 152 | 153 | changes: 154 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 155 | @echo 156 | @echo "The overview file is in $(BUILDDIR)/changes." 157 | 158 | linkcheck: 159 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 160 | @echo 161 | @echo "Link check complete; look for any errors in the above output " \ 162 | "or in $(BUILDDIR)/linkcheck/output.txt." 163 | 164 | doctest: 165 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 166 | @echo "Testing of doctests in the sources finished, look at the " \ 167 | "results in $(BUILDDIR)/doctest/output.txt." 168 | 169 | xml: 170 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 171 | @echo 172 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 173 | 174 | pseudoxml: 175 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 176 | @echo 177 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 178 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | # 4 | # Hystrix Python documentation build configuration file, created by 5 | # sphinx-quickstart on Sat Feb 14 16:27:19 2015. 6 | # 7 | # This file is execfile()d with the current directory set to its 8 | # containing dir. 9 | # 10 | # Note that not all possible configuration values are present in this 11 | # autogenerated file. 12 | # 13 | # All configuration values have a default; values that are commented out 14 | # serve to show the default. 15 | 16 | import sys 17 | import os 18 | 19 | import sphinx_rtd_theme 20 | 21 | 22 | # If extensions (or modules to document with autodoc) are in another directory, 23 | # add these directories to sys.path here. If the directory is relative to the 24 | # documentation root, use os.path.abspath to make it absolute, like shown here. 25 | sys.path.insert(0, os.path.abspath('.')) 26 | sys.path.insert(0, os.path.abspath('..')) 27 | 28 | # -- General configuration ------------------------------------------------ 29 | 30 | # If your documentation needs a minimal Sphinx version, state it here. 31 | # needs_sphinx = '1.0' 32 | 33 | # Add any Sphinx extension module names here, as strings. They can be 34 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 35 | # ones. 36 | extensions = [ 37 | 'sphinx.ext.autodoc', 38 | 'sphinx.ext.coverage', 39 | 'sphinx.ext.intersphinx', 40 | 'sphinxcontrib.napoleon', 41 | ] 42 | 43 | intersphinx_mapping = {'python': ('http://docs.python.org/3.4', None)} 44 | autodoc_member_order = 'bysource' 45 | 46 | 47 | # Add any paths that contain templates here, relative to this directory. 48 | templates_path = ['_templates'] 49 | 50 | # The suffix of source filenames. 51 | source_suffix = '.rst' 52 | 53 | # The encoding of source files. 54 | # source_encoding = 'utf-8-sig' 55 | 56 | # The master toctree document. 57 | master_doc = 'index' 58 | 59 | # General information about the project. 60 | project = 'Hystrix Python' 61 | copyright = '2015, Hystrix Python Authors' 62 | 63 | # The version info for the project you're documenting, acts as replacement for 64 | # |version| and |release|, also used in various other places throughout the 65 | # built documents. 66 | # 67 | # The short X.Y version. 68 | version = '0.1.0' 69 | # The full version, including alpha/beta/rc tags. 70 | release = '0.1.0' 71 | 72 | # The language for content autogenerated by Sphinx. Refer to documentation 73 | # for a list of supported languages. 74 | # language = None 75 | 76 | # There are two options for replacing |today|: either, you set today to some 77 | # non-false value, then it is used: 78 | # today = '' 79 | # Else, today_fmt is used as the format for a strftime call. 80 | # today_fmt = '%B %d, %Y' 81 | 82 | # List of patterns, relative to source directory, that match files and 83 | # directories to ignore when looking for source files. 84 | exclude_patterns = ['_build'] 85 | 86 | # The reST default role (used for this markup: `text`) to use for all 87 | # documents. 88 | # default_role = None 89 | 90 | # If true, '()' will be appended to :func: etc. cross-reference text. 91 | # add_function_parentheses = True 92 | 93 | # If true, the current module name will be prepended to all description 94 | # unit titles (such as .. function::). 95 | # add_module_names = True 96 | 97 | # If true, sectionauthor and moduleauthor directives will be shown in the 98 | # output. They are ignored by default. 99 | # show_authors = False 100 | 101 | # The name of the Pygments (syntax highlighting) style to use. 102 | pygments_style = 'sphinx' 103 | 104 | # A list of ignored prefixes for module index sorting. 105 | # modindex_common_prefix = [] 106 | 107 | # If true, keep warnings as "system message" paragraphs in the built documents. 108 | # keep_warnings = False 109 | 110 | 111 | # -- Options for HTML output ---------------------------------------------- 112 | 113 | # The theme to use for HTML and HTML Help pages. See the documentation for 114 | # a list of builtin themes. 115 | html_theme = 'sphinx_rtd_theme' 116 | 117 | # Theme options are theme-specific and customize the look and feel of a theme 118 | # further. For a list of options available for each theme, see the 119 | # documentation. 120 | # html_theme_options = {} 121 | 122 | # Add any paths that contain custom themes here, relative to this directory. 123 | html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 124 | 125 | # The name for this set of Sphinx documents. If None, it defaults to 126 | # " v documentation". 127 | # html_title = None 128 | 129 | # A shorter title for the navigation bar. Default is the same as html_title. 130 | # html_short_title = None 131 | 132 | # The name of an image file (relative to this directory) to place at the top 133 | # of the sidebar. 134 | # html_logo = None 135 | 136 | # The name of an image file (within the static path) to use as favicon of the 137 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 138 | # pixels large. 139 | # html_favicon = None 140 | 141 | # Add any paths that contain custom static files (such as style sheets) here, 142 | # relative to this directory. They are copied after the builtin static files, 143 | # so a file named "default.css" will overwrite the builtin "default.css". 144 | html_static_path = ['_static'] 145 | 146 | # Add any extra paths that contain custom files (such as robots.txt or 147 | # .htaccess) here, relative to this directory. These files are copied 148 | # directly to the root of the documentation. 149 | # html_extra_path = [] 150 | 151 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 152 | # using the given strftime format. 153 | # html_last_updated_fmt = '%b %d, %Y' 154 | 155 | # If true, SmartyPants will be used to convert quotes and dashes to 156 | # typographically correct entities. 157 | # html_use_smartypants = True 158 | 159 | # Custom sidebar templates, maps document names to template names. 160 | # html_sidebars = {} 161 | 162 | # Additional templates that should be rendered to pages, maps page names to 163 | # template names. 164 | # html_additional_pages = {} 165 | 166 | # If false, no module index is generated. 167 | # html_domain_indices = True 168 | 169 | # If false, no index is generated. 170 | # html_use_index = True 171 | 172 | 173 | # If true, the index is split into individual pages for each letter. 174 | # html_split_index = False 175 | 176 | # If true, links to the reST sources are added to the pages. 177 | # html_show_sourcelink = True 178 | 179 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 180 | # html_show_sphinx = True 181 | 182 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 183 | # html_show_copyright = True 184 | 185 | # If true, an OpenSearch description file will be output, and all pages will 186 | # contain a tag referring to it. The value of this option must be the 187 | # base URL from which the finished HTML is served. 188 | # html_use_opensearch = '' 189 | 190 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 191 | # html_file_suffix = None 192 | 193 | # Output file base name for HTML help builder. 194 | htmlhelp_basename = 'HystrixPythondoc' 195 | 196 | 197 | # -- Options for LaTeX output --------------------------------------------- 198 | 199 | latex_elements = { 200 | # The paper size ('letterpaper' or 'a4paper'). 201 | # 'papersize': 'letterpaper', 202 | 203 | # The font size ('10pt', '11pt' or '12pt'). 204 | # 'pointsize': '10pt', 205 | 206 | # Additional stuff for the LaTeX preamble. 207 | # 'preamble': '', 208 | } 209 | 210 | # Grouping the document tree into LaTeX files. List of tuples 211 | # (source start file, target name, title, 212 | # author, documentclass [howto, manual, or own class]). 213 | latex_documents = [ 214 | ('index', 'HystrixPython.tex', 'Hystrix Python Documentation', 215 | 'Hystrix Python Authors', 'manual'), 216 | ] 217 | 218 | # The name of an image file (relative to this directory) to place at the top of 219 | # the title page. 220 | #latex_logo = None 221 | 222 | # For "manual" documents, if this is true, then toplevel headings are parts, 223 | # not chapters. 224 | #latex_use_parts = False 225 | 226 | # If true, show page references after internal links. 227 | #latex_show_pagerefs = False 228 | 229 | # If true, show URL addresses after external links. 230 | #latex_show_urls = False 231 | 232 | # Documents to append as an appendix to all manuals. 233 | #latex_appendices = [] 234 | 235 | # If false, no module index is generated. 236 | #latex_domain_indices = True 237 | 238 | 239 | # -- Options for manual page output --------------------------------------- 240 | 241 | # One entry per manual page. List of tuples 242 | # (source start file, name, description, authors, manual section). 243 | man_pages = [ 244 | ('index', 'hystrixpython', 'Hystrix Python Documentation', 245 | ['Hystrix Python Authors'], 1) 246 | ] 247 | 248 | # If true, show URL addresses after external links. 249 | #man_show_urls = False 250 | 251 | 252 | # -- Options for Texinfo output ------------------------------------------- 253 | 254 | # Grouping the document tree into Texinfo files. List of tuples 255 | # (source start file, target name, title, author, 256 | # dir menu entry, description, category) 257 | texinfo_documents = [ 258 | ('index', 'HystrixPython', 'Hystrix Python Documentation', 259 | 'Hystrix Python Authors', 'HystrixPython', 'One line description of project.', 260 | 'Miscellaneous'), 261 | ] 262 | 263 | # Documents to append as an appendix to all manuals. 264 | #texinfo_appendices = [] 265 | 266 | # If false, no module index is generated. 267 | #texinfo_domain_indices = True 268 | 269 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 270 | #texinfo_show_urls = 'footnote' 271 | 272 | # If true, do not generate a @detailmenu in the "Top" node's menu. 273 | #texinfo_no_detailmenu = False 274 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. Hystrix Python documentation master file, created by 2 | sphinx-quickstart on Sat Feb 14 16:27:19 2015. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Welcome to Hystrix Python's documentation! 7 | ========================================== 8 | 9 | .. toctree:: 10 | :maxdepth: 1 11 | 12 | .. toctree:: 13 | :maxdepth: 9 14 | 15 | modules/modules 16 | 17 | Indices and tables 18 | ================== 19 | 20 | * :ref:`genindex` 21 | * :ref:`modindex` 22 | * :ref:`search` 23 | 24 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | REM Command file for Sphinx documentation 4 | 5 | if "%SPHINXBUILD%" == "" ( 6 | set SPHINXBUILD=sphinx-build 7 | ) 8 | set BUILDDIR=_build 9 | set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . 10 | set I18NSPHINXOPTS=%SPHINXOPTS% . 11 | if NOT "%PAPER%" == "" ( 12 | set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% 13 | set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% 14 | ) 15 | 16 | if "%1" == "" goto help 17 | 18 | if "%1" == "help" ( 19 | :help 20 | echo.Please use `make ^` where ^ is one of 21 | echo. html to make standalone HTML files 22 | echo. dirhtml to make HTML files named index.html in directories 23 | echo. singlehtml to make a single large HTML file 24 | echo. pickle to make pickle files 25 | echo. json to make JSON files 26 | echo. htmlhelp to make HTML files and a HTML help project 27 | echo. qthelp to make HTML files and a qthelp project 28 | echo. devhelp to make HTML files and a Devhelp project 29 | echo. epub to make an epub 30 | echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter 31 | echo. text to make text files 32 | echo. man to make manual pages 33 | echo. texinfo to make Texinfo files 34 | echo. gettext to make PO message catalogs 35 | echo. changes to make an overview over all changed/added/deprecated items 36 | echo. xml to make Docutils-native XML files 37 | echo. pseudoxml to make pseudoxml-XML files for display purposes 38 | echo. linkcheck to check all external links for integrity 39 | echo. doctest to run all doctests embedded in the documentation if enabled 40 | goto end 41 | ) 42 | 43 | if "%1" == "clean" ( 44 | for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i 45 | del /q /s %BUILDDIR%\* 46 | goto end 47 | ) 48 | 49 | 50 | %SPHINXBUILD% 2> nul 51 | if errorlevel 9009 ( 52 | echo. 53 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 54 | echo.installed, then set the SPHINXBUILD environment variable to point 55 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 56 | echo.may add the Sphinx directory to PATH. 57 | echo. 58 | echo.If you don't have Sphinx installed, grab it from 59 | echo.http://sphinx-doc.org/ 60 | exit /b 1 61 | ) 62 | 63 | if "%1" == "html" ( 64 | %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html 65 | if errorlevel 1 exit /b 1 66 | echo. 67 | echo.Build finished. The HTML pages are in %BUILDDIR%/html. 68 | goto end 69 | ) 70 | 71 | if "%1" == "dirhtml" ( 72 | %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml 73 | if errorlevel 1 exit /b 1 74 | echo. 75 | echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. 76 | goto end 77 | ) 78 | 79 | if "%1" == "singlehtml" ( 80 | %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml 81 | if errorlevel 1 exit /b 1 82 | echo. 83 | echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. 84 | goto end 85 | ) 86 | 87 | if "%1" == "pickle" ( 88 | %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle 89 | if errorlevel 1 exit /b 1 90 | echo. 91 | echo.Build finished; now you can process the pickle files. 92 | goto end 93 | ) 94 | 95 | if "%1" == "json" ( 96 | %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json 97 | if errorlevel 1 exit /b 1 98 | echo. 99 | echo.Build finished; now you can process the JSON files. 100 | goto end 101 | ) 102 | 103 | if "%1" == "htmlhelp" ( 104 | %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp 105 | if errorlevel 1 exit /b 1 106 | echo. 107 | echo.Build finished; now you can run HTML Help Workshop with the ^ 108 | .hhp project file in %BUILDDIR%/htmlhelp. 109 | goto end 110 | ) 111 | 112 | if "%1" == "qthelp" ( 113 | %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp 114 | if errorlevel 1 exit /b 1 115 | echo. 116 | echo.Build finished; now you can run "qcollectiongenerator" with the ^ 117 | .qhcp project file in %BUILDDIR%/qthelp, like this: 118 | echo.^> qcollectiongenerator %BUILDDIR%\qthelp\HystrixPython.qhcp 119 | echo.To view the help file: 120 | echo.^> assistant -collectionFile %BUILDDIR%\qthelp\HystrixPython.ghc 121 | goto end 122 | ) 123 | 124 | if "%1" == "devhelp" ( 125 | %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp 126 | if errorlevel 1 exit /b 1 127 | echo. 128 | echo.Build finished. 129 | goto end 130 | ) 131 | 132 | if "%1" == "epub" ( 133 | %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub 134 | if errorlevel 1 exit /b 1 135 | echo. 136 | echo.Build finished. The epub file is in %BUILDDIR%/epub. 137 | goto end 138 | ) 139 | 140 | if "%1" == "latex" ( 141 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 142 | if errorlevel 1 exit /b 1 143 | echo. 144 | echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. 145 | goto end 146 | ) 147 | 148 | if "%1" == "latexpdf" ( 149 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 150 | cd %BUILDDIR%/latex 151 | make all-pdf 152 | cd %BUILDDIR%/.. 153 | echo. 154 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 155 | goto end 156 | ) 157 | 158 | if "%1" == "latexpdfja" ( 159 | %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex 160 | cd %BUILDDIR%/latex 161 | make all-pdf-ja 162 | cd %BUILDDIR%/.. 163 | echo. 164 | echo.Build finished; the PDF files are in %BUILDDIR%/latex. 165 | goto end 166 | ) 167 | 168 | if "%1" == "text" ( 169 | %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text 170 | if errorlevel 1 exit /b 1 171 | echo. 172 | echo.Build finished. The text files are in %BUILDDIR%/text. 173 | goto end 174 | ) 175 | 176 | if "%1" == "man" ( 177 | %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man 178 | if errorlevel 1 exit /b 1 179 | echo. 180 | echo.Build finished. The manual pages are in %BUILDDIR%/man. 181 | goto end 182 | ) 183 | 184 | if "%1" == "texinfo" ( 185 | %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo 186 | if errorlevel 1 exit /b 1 187 | echo. 188 | echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. 189 | goto end 190 | ) 191 | 192 | if "%1" == "gettext" ( 193 | %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale 194 | if errorlevel 1 exit /b 1 195 | echo. 196 | echo.Build finished. The message catalogs are in %BUILDDIR%/locale. 197 | goto end 198 | ) 199 | 200 | if "%1" == "changes" ( 201 | %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes 202 | if errorlevel 1 exit /b 1 203 | echo. 204 | echo.The overview file is in %BUILDDIR%/changes. 205 | goto end 206 | ) 207 | 208 | if "%1" == "linkcheck" ( 209 | %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck 210 | if errorlevel 1 exit /b 1 211 | echo. 212 | echo.Link check complete; look for any errors in the above output ^ 213 | or in %BUILDDIR%/linkcheck/output.txt. 214 | goto end 215 | ) 216 | 217 | if "%1" == "doctest" ( 218 | %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest 219 | if errorlevel 1 exit /b 1 220 | echo. 221 | echo.Testing of doctests in the sources finished, look at the ^ 222 | results in %BUILDDIR%/doctest/output.txt. 223 | goto end 224 | ) 225 | 226 | if "%1" == "xml" ( 227 | %SPHINXBUILD% -b xml %ALLSPHINXOPTS% %BUILDDIR%/xml 228 | if errorlevel 1 exit /b 1 229 | echo. 230 | echo.Build finished. The XML files are in %BUILDDIR%/xml. 231 | goto end 232 | ) 233 | 234 | if "%1" == "pseudoxml" ( 235 | %SPHINXBUILD% -b pseudoxml %ALLSPHINXOPTS% %BUILDDIR%/pseudoxml 236 | if errorlevel 1 exit /b 1 237 | echo. 238 | echo.Build finished. The pseudo-XML files are in %BUILDDIR%/pseudoxml. 239 | goto end 240 | ) 241 | 242 | :end 243 | -------------------------------------------------------------------------------- /docs/modules/hystrix.circuitbreaker.rst: -------------------------------------------------------------------------------- 1 | hystrix.circuitbreaker module 2 | ============================= 3 | 4 | .. automodule:: hystrix.circuitbreaker 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/modules/hystrix.command.rst: -------------------------------------------------------------------------------- 1 | hystrix.command module 2 | ====================== 3 | 4 | .. automodule:: hystrix.command 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/modules/hystrix.command_metrics.rst: -------------------------------------------------------------------------------- 1 | hystrix.command_metrics module 2 | ============================== 3 | 4 | .. automodule:: hystrix.command_metrics 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/modules/hystrix.command_properties.rst: -------------------------------------------------------------------------------- 1 | hystrix.command_properties module 2 | ================================= 3 | 4 | .. automodule:: hystrix.command_properties 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/modules/hystrix.event_type.rst: -------------------------------------------------------------------------------- 1 | hystrix.event_type module 2 | ========================= 3 | 4 | .. automodule:: hystrix.event_type 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/modules/hystrix.group.rst: -------------------------------------------------------------------------------- 1 | hystrix.group module 2 | ==================== 3 | 4 | .. automodule:: hystrix.group 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/modules/hystrix.metrics.rst: -------------------------------------------------------------------------------- 1 | hystrix.metrics module 2 | ====================== 3 | 4 | .. automodule:: hystrix.metrics 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/modules/hystrix.pool.rst: -------------------------------------------------------------------------------- 1 | hystrix.pool module 2 | ======================= 3 | 4 | .. automodule:: hystrix.pool 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/modules/hystrix.pool_metrics.rst: -------------------------------------------------------------------------------- 1 | hystrix.pool_metrics module 2 | =============================== 3 | 4 | .. automodule:: hystrix.pool_metrics 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/modules/hystrix.rolling_number.rst: -------------------------------------------------------------------------------- 1 | hystrix.rolling_number module 2 | ============================= 3 | 4 | .. automodule:: hystrix.rolling_number 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/modules/hystrix.rolling_percentile.rst: -------------------------------------------------------------------------------- 1 | hystrix.rolling_percentile module 2 | ================================= 3 | 4 | .. automodule:: hystrix.rolling_percentile 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/modules/hystrix.rst: -------------------------------------------------------------------------------- 1 | hystrix package 2 | =============== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | hystrix.strategy 10 | 11 | Submodules 12 | ---------- 13 | 14 | .. toctree:: 15 | 16 | hystrix.circuitbreaker 17 | hystrix.command 18 | hystrix.command_metrics 19 | hystrix.command_properties 20 | hystrix.event_type 21 | hystrix.pool 22 | hystrix.pool_metrics 23 | hystrix.group 24 | hystrix.metrics 25 | hystrix.rolling_number 26 | hystrix.rolling_percentile 27 | 28 | Module contents 29 | --------------- 30 | 31 | .. automodule:: hystrix 32 | :members: 33 | :undoc-members: 34 | :show-inheritance: 35 | -------------------------------------------------------------------------------- /docs/modules/hystrix.strategy.eventnotifier.event_notifier.rst: -------------------------------------------------------------------------------- 1 | hystrix.strategy.eventnotifier.event_notifier module 2 | ==================================================== 3 | 4 | .. automodule:: hystrix.strategy.eventnotifier.event_notifier 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/modules/hystrix.strategy.eventnotifier.event_notifier_default.rst: -------------------------------------------------------------------------------- 1 | hystrix.strategy.eventnotifier.event_notifier_default module 2 | ============================================================ 3 | 4 | .. automodule:: hystrix.strategy.eventnotifier.event_notifier_default 5 | :members: 6 | :undoc-members: 7 | :show-inheritance: 8 | -------------------------------------------------------------------------------- /docs/modules/hystrix.strategy.eventnotifier.rst: -------------------------------------------------------------------------------- 1 | hystrix.strategy.eventnotifier package 2 | ====================================== 3 | 4 | Submodules 5 | ---------- 6 | 7 | .. toctree:: 8 | 9 | hystrix.strategy.eventnotifier.event_notifier 10 | hystrix.strategy.eventnotifier.event_notifier_default 11 | 12 | Module contents 13 | --------------- 14 | 15 | .. automodule:: hystrix.strategy.eventnotifier 16 | :members: 17 | :undoc-members: 18 | :show-inheritance: 19 | -------------------------------------------------------------------------------- /docs/modules/hystrix.strategy.rst: -------------------------------------------------------------------------------- 1 | hystrix.strategy package 2 | ======================== 3 | 4 | Subpackages 5 | ----------- 6 | 7 | .. toctree:: 8 | 9 | hystrix.strategy.eventnotifier 10 | 11 | Module contents 12 | --------------- 13 | 14 | .. automodule:: hystrix.strategy 15 | :members: 16 | :undoc-members: 17 | :show-inheritance: 18 | -------------------------------------------------------------------------------- /docs/modules/modules.rst: -------------------------------------------------------------------------------- 1 | hystrix 2 | ======= 3 | 4 | .. toctree:: 5 | :maxdepth: 4 6 | 7 | hystrix 8 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx 2 | sphinx_rtd_theme 3 | sphinxcontrib-napoleon 4 | -------------------------------------------------------------------------------- /hystrix/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import absolute_import, unicode_literals 3 | import logging 4 | 5 | from .command_metrics import CommandMetrics 6 | from .pool_metrics import PoolMetrics 7 | from .pool import Pool 8 | from .command import Command 9 | from .group import Group 10 | 11 | 12 | try: # Python 2.7+ 13 | from logging import NullHandler 14 | except ImportError: 15 | class NullHandler(logging.Handler): 16 | 17 | def emit(self, record): 18 | pass 19 | 20 | logging.getLogger('hystrix').addHandler(NullHandler()) 21 | -------------------------------------------------------------------------------- /hystrix/circuitbreaker.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | import logging 3 | 4 | import six 5 | 6 | log = logging.getLogger(__name__) 7 | 8 | 9 | class CircuitBreakerMetaclass(type): 10 | 11 | __instances__ = dict() 12 | __blacklist__ = ('CircuitBreaker', 'CircuitBreakerMetaclass') 13 | 14 | def __new__(cls, name, bases, attrs): 15 | if name in cls.__blacklist__: 16 | return super(CircuitBreakerMetaclass, cls).__new__(cls, name, 17 | bases, attrs) 18 | 19 | class_name = attrs.get('__circuit_breaker_name__', 20 | '{}CircuitBreaker'.format(name)) 21 | new_class = super(CircuitBreakerMetaclass, cls).__new__(cls, 22 | class_name, 23 | bases, attrs) 24 | 25 | setattr(new_class, 'circuit_breaker_name', class_name) 26 | 27 | if class_name not in cls.__instances__: 28 | cls.__instances__[class_name] = new_class 29 | 30 | return cls.__instances__[class_name] 31 | 32 | 33 | class CircuitBreaker(six.with_metaclass(CircuitBreakerMetaclass, object)): 34 | 35 | __circuit_breaker_name__ = None 36 | -------------------------------------------------------------------------------- /hystrix/command.py: -------------------------------------------------------------------------------- 1 | """ 2 | Used to wrap code that will execute potentially risky functionality 3 | (typically meaning a service call over the network) with fault and latency 4 | tolerance, statistics and performance metrics capture, circuit breaker and 5 | bulkhead functionality. 6 | """ 7 | from __future__ import absolute_import 8 | import logging 9 | 10 | import six 11 | 12 | from hystrix.group import Group 13 | from hystrix.command_metrics import CommandMetrics 14 | from hystrix.command_properties import CommandProperties 15 | 16 | log = logging.getLogger(__name__) 17 | 18 | 19 | # TODO: Change this to an AbstractCommandMetaclass 20 | class CommandMetaclass(type): 21 | 22 | __blacklist__ = ('Command', 'CommandMetaclass') 23 | 24 | def __new__(cls, name, bases, attrs): 25 | # Command key initialization 26 | command_key = attrs.get('command_key') or name 27 | new_class = type.__new__(cls, command_key, bases, attrs) 28 | 29 | if name in cls.__blacklist__: 30 | return new_class 31 | 32 | # TODO: Check instance CommandProperties here? 33 | command_properties_defaults = attrs.get('command_properties_defaults') 34 | if command_properties_defaults is None: 35 | command_properties_defaults = CommandProperties.setter() 36 | 37 | # Properties initialization 38 | properties_strategy = attrs.get('properties_strategy') 39 | if properties_strategy is None: 40 | properties_strategy = CommandProperties( 41 | command_key, command_properties_defaults) 42 | 43 | setattr(new_class, 'properties', properties_strategy) 44 | 45 | # Pool key 46 | # This defines which pool this command should run on. 47 | # It uses the pool_key if provided, then defaults to use Group key. 48 | # It can then be overridden by a property if defined so it can be 49 | # changed at runtime. 50 | pool_key = attrs.get('pool_key') 51 | 52 | # Group key initialization 53 | group_key = attrs.get('group_key') or '{}Group'.format(command_key) 54 | NewGroup = type(group_key, (Group,), 55 | dict(group_key=group_key, pool_key=pool_key)) 56 | 57 | setattr(new_class, 'group', NewGroup()) 58 | setattr(new_class, 'group_key', group_key) 59 | setattr(new_class, 'command_key', command_key) 60 | 61 | # Metrics initialization 62 | command_metrics_key = '{}CommandMetrics'.format(command_key) 63 | # TODO: Check instance CommandMetrics here? 64 | metrics = attrs.get('metrics') 65 | if metrics is None: 66 | NewCommandMetrics = type( 67 | command_metrics_key, (CommandMetrics,), 68 | dict(command_metrics_key=command_metrics_key, 69 | group_key=group_key, pool_key=pool_key)) 70 | metrics = NewCommandMetrics(properties=properties_strategy) 71 | 72 | setattr(new_class, 'metrics', metrics) 73 | 74 | return new_class 75 | 76 | 77 | # TODO: Change this to inherit from an AbstractCommand 78 | class Command(six.with_metaclass(CommandMetaclass, object)): 79 | 80 | command_key = None 81 | group_key = None 82 | 83 | def __init__(self, group_key=None, command_key=None, 84 | pool_key=None, circuit_breaker=None, pool=None, 85 | command_properties_defaults=None, 86 | pool_properties_defaults=None, metrics=None, 87 | fallback_semaphore=None, execution_semaphore=None, 88 | properties_strategy=None, execution_hook=None, timeout=None): 89 | self.timeout = timeout 90 | 91 | def run(self): 92 | raise NotImplementedError('Subclasses must implement this method.') 93 | 94 | def fallback(self): 95 | raise NotImplementedError('Subclasses must implement this method.') 96 | 97 | def cache(self): 98 | raise NotImplementedError('Subclasses must implement this method.') 99 | 100 | def execute(self, timeout=None): 101 | timeout = timeout or self.timeout 102 | future = self.group.pool.submit(self.run) 103 | try: 104 | return future.result(timeout) 105 | except Exception: 106 | log.exception('exception calling run for {}'.format(self)) 107 | log.info('run raises {}'.format(future.exception)) 108 | try: 109 | log.info('trying fallback for {}'.format(self)) 110 | future = self.group.pool.submit(self.fallback) 111 | return future.result(timeout) 112 | except Exception: 113 | log.exception('exception calling fallback for {}'.format(self)) 114 | log.info('run() raised {}'.format(future.exception)) 115 | log.info('trying cache for {}'.format(self)) 116 | future = self.group.pool.submit(self.cache) 117 | return future.result(timeout) 118 | 119 | def observe(self, timeout=None): 120 | timeout = timeout or self.timeout 121 | return self.__async(timeout=timeout) 122 | 123 | def queue(self, timeout=None): 124 | timeout = timeout or self.timeout 125 | return self.__async(timeout=timeout) 126 | 127 | def __async(self, timeout=None): 128 | timeout = timeout or self.timeout 129 | future = self.group.pool.submit(self.run) 130 | try: 131 | # Call result() to check for exception 132 | future.result(timeout) 133 | return future 134 | except Exception: 135 | log.exception('exception calling run for {}'.format(self)) 136 | log.info('run raised {}'.format(future.exception)) 137 | try: 138 | log.info('trying fallback for {}'.format(self)) 139 | future = self.group.pool.submit(self.fallback) 140 | # Call result() to check for exception 141 | future.result(timeout) 142 | return future 143 | except Exception: 144 | log.exception('exception calling fallback for {}'.format(self)) 145 | log.info('fallback raised {}'.format(future.exception)) 146 | log.info('trying cache for {}'.format(self)) 147 | return self.group.pool.submit(self.cache) 148 | -------------------------------------------------------------------------------- /hystrix/command_metrics.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | import logging 3 | 4 | import six 5 | 6 | from atomos.multiprocessing.atomic import AtomicLong 7 | 8 | from hystrix.metrics import Metrics 9 | from hystrix.event_type import EventType 10 | from hystrix.rolling_number import (RollingNumber, RollingNumberEvent, 11 | ActualTime) 12 | 13 | log = logging.getLogger(__name__) 14 | 15 | 16 | class CommandMetricsMetaclass(type): 17 | """ Metaclass for :class:`CommandMetrics` 18 | 19 | Return a cached or create the :class:`CommandMetrics` instance for a given 20 | :class:`hystrix.command.Command` name. 21 | 22 | This ensures only 1 :class:`CommandMetrics` instance per 23 | :class:`hystrix.command.Command` name. 24 | """ 25 | 26 | __instances__ = dict() 27 | __blacklist__ = ('CommandMetrics', 'CommandMetricsMetaclass') 28 | 29 | def __new__(cls, name, bases, attrs): 30 | 31 | # Do not use cache for black listed classes. 32 | if name in cls.__blacklist__: 33 | return super(CommandMetricsMetaclass, cls).__new__(cls, name, 34 | bases, attrs) 35 | 36 | # User defined class name or create a default. 37 | command_metrics_key = attrs.get('command_metrics_key') or \ 38 | '{}CommandMetrics'.format(name) 39 | 40 | # Check for CommandMetrics class instance 41 | if command_metrics_key not in cls.__instances__: 42 | new_class = super(CommandMetricsMetaclass, cls).__new__(cls, 43 | command_metrics_key, 44 | bases, 45 | attrs) 46 | setattr(new_class, 'command_metrics_key', command_metrics_key) 47 | cls.__instances__[command_metrics_key] = new_class 48 | 49 | return cls.__instances__[command_metrics_key] 50 | 51 | 52 | class CommandMetrics(six.with_metaclass(CommandMetricsMetaclass, Metrics)): 53 | """ Used by :class:`hystrix.command.Command` to record metrics. 54 | """ 55 | command_metrics_key = None 56 | 57 | # TODO: Review default value None here 58 | def __init__(self, command_metrics_key=None, group_key=None, 59 | pool_key=None, properties=None, event_notifier=None): 60 | counter = RollingNumber( 61 | properties.metrics_rolling_statistical_window_in_milliseconds(), 62 | properties.metrics_rolling_statistical_window_buckets()) 63 | super(CommandMetrics, self).__init__(counter) 64 | self.properties = properties 65 | self.actual_time = ActualTime() 66 | self.group_key = group_key 67 | self.event_notifier = event_notifier 68 | self.health_counts_snapshot = None 69 | self.last_health_counts_snapshot = AtomicLong(value=self.actual_time.current_time_in_millis()) 70 | 71 | def mark_success(self, duration): 72 | """ Mark success incrementing counter and emiting event 73 | 74 | When a :class:`hystrix.command.Command` successfully completes it will 75 | call this method to report its success along with how long the 76 | execution took. 77 | 78 | Args: 79 | duration: Command duration 80 | """ 81 | 82 | # TODO: Why this receive a parameter and do nothing with it? 83 | self.event_notifier.mark_event(EventType.SUCCESS, self.command_metrics_key) 84 | self.counter.increment(RollingNumberEvent.SUCCESS) 85 | 86 | def mark_failure(self, duration): 87 | """ Mark failure incrementing counter and emiting event 88 | 89 | When a :class:`hystrix.command.Command` fail to completes it will 90 | call this method to report its failure along with how long the 91 | execution took. 92 | 93 | Args: 94 | duration: Command duration 95 | """ 96 | 97 | # TODO: Why this receive a parameter and do nothing with it? 98 | self.event_notifier.mark_event(EventType.FAILURE, self.command_metrics_key) 99 | self.counter.increment(RollingNumberEvent.FAILURE) 100 | 101 | def mark_timeout(self, duration): 102 | """ Mark timeout incrementing counter and emiting event 103 | 104 | When a :class:`hystrix.command.Command` times out (fails to complete) 105 | it will call this method to report its failure along with how long the 106 | command waited (this time should equal or be very close to the timeout 107 | value). 108 | 109 | Args: 110 | duration: Command duration 111 | """ 112 | 113 | # TODO: Why this receive a parameter and do nothing with it? 114 | self.event_notifier.mark_event(EventType.TIMEOUT, self.command_metrics_key) 115 | self.counter.increment(RollingNumberEvent.TIMEOUT) 116 | 117 | def mark_bad_request(self, duration): 118 | """ Mark bad request incrementing counter and emiting event 119 | 120 | When a :class:`hystrix.command.Command` is executed and triggers a 121 | :class:`hystrix.BadRequestException` during its execution it willi 122 | call this method to report its failure along with how long the 123 | command waited (this time should equal or be very close to the timeout 124 | value). 125 | 126 | Args: 127 | duration: Command duration 128 | """ 129 | 130 | # TODO: Why this receive a parameter and do nothing with it? 131 | self.event_notifier.mark_event(EventType.BAD_REQUEST, self.command_metrics_key) 132 | self.counter.increment(RollingNumberEvent.BAD_REQUEST) 133 | 134 | def health_counts(self): 135 | """ Health counts 136 | 137 | Retrieve a snapshot of total requests, error count and error percentage. 138 | 139 | Returns: 140 | instance: :class:`hystrix.command_metrics.HealthCounts` 141 | """ 142 | # we put an interval between snapshots so high-volume commands don't 143 | # spend too much unnecessary time calculating metrics in very small time periods 144 | last_time = self.last_health_counts_snapshot.get() 145 | current_time = ActualTime().current_time_in_millis() 146 | if (current_time - last_time) >= self.properties.metrics_health_snapshot_interval_in_milliseconds() or self.health_counts_snapshot is None: 147 | if self.last_health_counts_snapshot.compare_and_set(last_time, current_time): 148 | # Our thread won setting the snapshot time so we will 149 | # proceed with generating a new snapshot 150 | # losing threads will continue using the old snapshot 151 | success = self.counter.rolling_sum(RollingNumberEvent.SUCCESS) 152 | failure = self.counter.rolling_sum(RollingNumberEvent.FAILURE) 153 | timeout = self.counter.rolling_sum(RollingNumberEvent.TIMEOUT) 154 | thread_pool_rejected = self.counter.rolling_sum(RollingNumberEvent.THREAD_POOL_REJECTED) 155 | semaphore_rejected = self.counter.rolling_sum(RollingNumberEvent.SEMAPHORE_REJECTED) 156 | short_circuited = self.counter.rolling_sum(RollingNumberEvent.SHORT_CIRCUITED) 157 | total_count = failure + success + timeout + thread_pool_rejected + short_circuited + semaphore_rejected 158 | error_count = failure + timeout + thread_pool_rejected + short_circuited + semaphore_rejected 159 | error_percentage = 0 160 | 161 | if total_count > 0: 162 | error_percentage = int(error_count / total_count * 100) 163 | 164 | self.health_counts_snapshot = HealthCounts(total_count, error_count, error_percentage) 165 | 166 | return self.health_counts_snapshot 167 | 168 | 169 | class HealthCounts(object): 170 | """ Number of requests during rolling window. 171 | 172 | Number that failed (failure + success + timeout + thread pool rejected + 173 | short circuited + semaphore rejected). 174 | 175 | Error percentage; 176 | """ 177 | def __init__(self, total, error, error_percentage): 178 | self._total_count = total 179 | self._error_count = error 180 | self._error_percentage = error_percentage 181 | 182 | def total_requests(self): 183 | """ Total reqeust 184 | 185 | Returns: 186 | int: Returns total request count. 187 | """ 188 | return self._total_count 189 | 190 | def error_count(self): 191 | """ Error count 192 | 193 | Returns: 194 | int: Returns error count. 195 | """ 196 | return self._error_count 197 | 198 | def error_percentage(self): 199 | """ Error percentage 200 | 201 | Returns: 202 | int: Returns error percentage. 203 | """ 204 | return self._error_percentage 205 | -------------------------------------------------------------------------------- /hystrix/command_properties.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | import logging 3 | 4 | log = logging.getLogger(__name__) 5 | 6 | 7 | class CommandProperties(object): 8 | """ Properties for instances of :class:`hystrix.command.Command` 9 | """ 10 | 11 | # Default values 12 | 13 | # 10000 = 10 seconds (and default of 10 buckets so each bucket is 1 14 | # second) 15 | default_metrics_rolling_statistical_window = 10000 16 | 17 | # 10 buckets in a 10 second window so each bucket is 1 second 18 | default_metrics_rolling_statistical_window_buckets = 10 19 | 20 | # 20 requests in 10 seconds must occur before statistics matter 21 | default_circuit_breaker_request_volume_threshold = 20 22 | 23 | # 5000 = 5 seconds that we will sleep before trying again after tripping 24 | # the circuit 25 | default_circuit_breaker_sleep_window_in_milliseconds = 5000 26 | 27 | # 50 = if 50%+ of requests in 10 seconds are failures or latent when we 28 | # will trip the circuit 29 | default_circuit_breaker_error_threshold_percentage = 50 30 | 31 | # If ``False`` we want to allow traffic. 32 | default_circuit_breaker_force_open = False 33 | 34 | # If ``False`` ignore errors 35 | default_circuit_breaker_force_closed = False 36 | 37 | # 1000 = 1 second timeout 38 | default_execution_timeout_in_milliseconds = 1000 39 | 40 | # Whether a command should be executed in a separate thread or not 41 | default_execution_isolation_strategy = 0 42 | 43 | # Wheather a thread should interrupt on timeout. 44 | default_execution_isolation_thread_interrupt_on_timeout = True 45 | 46 | # Wheather rolling percentile should be enabled. 47 | default_metrics_rolling_percentile_enabled = True 48 | 49 | # Wheather request cache should be enabled 50 | default_request_cache_enabled = True 51 | 52 | # Default fallback isolation semaphore max concurrent requests 53 | default_fallback_isolation_semaphore_max_concurrent_requests = 10 54 | 55 | # Wheather fallback should be enabled 56 | default_fallback_enabled = True 57 | 58 | # Default execution isolation semaphore max concurrent requests 59 | default_execution_isolation_semaphore_max_concurrent_requests = 10 60 | 61 | # Wheather request log should be enabled 62 | default_request_log_enabled = True 63 | 64 | # Wheather circuit breaker should be enabled 65 | default_circuit_breaker_enabled = True 66 | 67 | # Default to 1 minute for 68 | # :class:`hystrix.rolling_percentile._rolling_percentile` 69 | default_metrics_rolling_percentile_window = 60000 70 | 71 | # Default to 6 buckets (10 seconds each in 60 second window) 72 | default_metrics_rolling_percentile_window_buckets = 6 73 | 74 | # Default to 100 values max per bucket 75 | default_metrics_rolling_percentile_bucket_size = 100 76 | 77 | # Default to 500ms as max frequency between allowing snapshots of health 78 | # (error percentage etc) 79 | default_metrics_health_snapshot_interval_in_milliseconds = 500 80 | 81 | def __init__(self, command_key, setter, property_prefix=None): 82 | self.command_key = command_key 83 | self.property_prefix = property_prefix 84 | 85 | # Whether circuit breaker should be enabled 86 | self._circuit_breaker_enabled = \ 87 | self._property( 88 | self.property_prefix, self.command_key, 89 | 'circuit_breaker.enabled', 90 | self.default_circuit_breaker_enabled, 91 | setter.circuit_breaker_enabled()) 92 | 93 | # Number of requests that must be made within a statisticalWindow 94 | # before open/close decisions are made using stats 95 | self._circuit_breaker_request_volume_threshold = \ 96 | self._property( 97 | self.property_prefix, self.command_key, 98 | 'circuit_breaker.request_volume_threshold', 99 | self.default_circuit_breaker_request_volume_threshold, 100 | setter.circuit_breaker_request_volume_threshold()) 101 | 102 | # Milliseconds after tripping circuit before allowing retry 103 | self._circuit_breaker_sleep_window_in_milliseconds = \ 104 | self._property( 105 | self.property_prefix, self.command_key, 106 | 'circuit_breaker.sleep_window_in_milliseconds', 107 | self.default_circuit_breaker_sleep_window_in_milliseconds, 108 | setter.circuit_breaker_sleep_window_in_milliseconds()) 109 | 110 | # % of 'marks' that must be failed to trip the circuit 111 | self._circuit_breaker_error_threshold_percentage = \ 112 | self._property( 113 | self.property_prefix, self.command_key, 114 | 'circuit_breaker.error_threshold_percentage', 115 | self.default_circuit_breaker_error_threshold_percentage, 116 | setter.circuit_breaker_error_threshold_percentage()) 117 | 118 | # A property to allow forcing the circuit open (stopping all requests) 119 | self._circuit_breaker_force_open = \ 120 | self._property( 121 | self.property_prefix, self.command_key, 122 | 'circuit_breaker.force_open', 123 | self.default_circuit_breaker_force_open, 124 | setter.circuit_breaker_force_open()) 125 | 126 | # a property to allow ignoring errors and therefore never trip 'open' 127 | # (ie. allow all traffic through) 128 | self._circuit_breaker_force_closed = \ 129 | self._property( 130 | self.property_prefix, self.command_key, 131 | 'circuit_breaker.force_closed', 132 | self.default_circuit_breaker_force_closed, 133 | setter.circuit_breaker_force_closed()) 134 | 135 | # Whether a command should be executed in a separate thread or not 136 | self._execution_isolation_strategy = \ 137 | self._property( 138 | self.property_prefix, self.command_key, 139 | 'execution.isolation.strategy', 140 | self.default_execution_isolation_strategy, 141 | setter.execution_isolation_strategy()) 142 | 143 | # Timeout value in milliseconds for a command 144 | self._execution_timeout_in_milliseconds = \ 145 | self._property( 146 | self.property_prefix, self.command_key, 147 | 'execution.isolation.thread.timeout_in_milliseconds', 148 | self.default_execution_timeout_in_milliseconds, 149 | setter.execution_timeout_in_milliseconds()) 150 | 151 | # execution_isolation_thread_pool_key_override 152 | 153 | # Number of permits for execution semaphore 154 | self._execution_isolation_semaphore_max_concurrent_requests = \ 155 | self._property( 156 | self.property_prefix, self.command_key, 157 | 'execution.isolation.semaphore.max_concurrent_requests', 158 | self.default_execution_isolation_semaphore_max_concurrent_requests, 159 | setter.execution_isolation_semaphore_max_concurrent_requests()) 160 | 161 | # Number of permits for fallback semaphore 162 | self._fallback_isolation_semaphore_max_concurrent_requests = \ 163 | self._property( 164 | self.property_prefix, self.command_key, 165 | 'fallback.isolation.semaphore.max_concurrent_requests', 166 | self.default_fallback_isolation_semaphore_max_concurrent_requests, 167 | setter.fallback_isolation_semaphore_max_concurrent_requests()) 168 | 169 | # Whether fallback should be attempted 170 | self._fallback_enabled = \ 171 | self._property( 172 | self.property_prefix, self.command_key, 'fallback.enabled', 173 | self.default_fallback_enabled, 174 | setter.fallback_enabled()) 175 | 176 | # Whether an underlying Future/Thread 177 | # (when runInSeparateThread == true) should be interrupted after a 178 | # timeout 179 | self._execution_isolation_thread_interrupt_on_timeout = \ 180 | self._property( 181 | self.property_prefix, self.command_key, 182 | 'execution.isolation.thread.interrupt_on_timeout', 183 | self.default_execution_isolation_thread_interrupt_on_timeout, 184 | setter.execution_isolation_thread_interrupt_on_timeout()) 185 | 186 | # Milliseconds back that will be tracked 187 | self._metrics_rolling_statistical_window_in_milliseconds = \ 188 | self._property( 189 | self.property_prefix, self.command_key, 190 | 'metrics.rolling_stats.time_in_milliseconds', 191 | self.default_metrics_rolling_statistical_window, 192 | setter.metrics_rolling_statistical_window_in_milliseconds()) 193 | 194 | # number of buckets in the statisticalWindow 195 | self._metrics_rolling_statistical_window_buckets = \ 196 | self._property( 197 | self.property_prefix, self.command_key, 198 | 'metrics.rolling_stats.num_buckets', 199 | self.default_metrics_rolling_statistical_window_buckets, 200 | setter.metrics_rolling_statistical_window_buckets()) 201 | 202 | # Whether monitoring should be enabled (SLA and Tracers) 203 | self._metrics_rolling_percentile_enabled = \ 204 | self._property( 205 | self.property_prefix, 206 | self.command_key, 'metrics.rolling_percentile.enabled', 207 | self.default_metrics_rolling_percentile_enabled, 208 | setter.metrics_rolling_percentile_enabled()) 209 | 210 | # Number of milliseconds that will be tracked in 211 | # :class:`hystrix.rolling_percentile.RollingPercentile` 212 | self._metrics_rolling_percentile_window_in_milliseconds = \ 213 | self._property( 214 | self.property_prefix, self.command_key, 215 | 'metrics.rolling_percentile.time_in_milliseconds', 216 | self.default_metrics_rolling_percentile_window, 217 | setter.metrics_rolling_percentile_window_in_milliseconds()) 218 | 219 | # Number of buckets percentileWindow will be divided into 220 | self._metrics_rolling_percentile_window_buckets = \ 221 | self._property( 222 | self.property_prefix, self.command_key, 223 | 'metrics.rolling_percentile.num_buckets', 224 | self.default_metrics_rolling_percentile_window_buckets, 225 | setter.metrics_rolling_percentile_window_buckets()) 226 | 227 | # How many values will be stored in each 228 | # :attr:`percentile_window_bucket` 229 | self._metrics_rolling_percentile_bucket_size = \ 230 | self._property( 231 | self.property_prefix, self.command_key, 232 | 'metrics.rolling_percentile.bucket_size', 233 | self.default_metrics_rolling_percentile_bucket_size, 234 | setter.metrics_rolling_percentile_bucket_size()) 235 | 236 | # Time between health snapshots 237 | self._metrics_health_snapshot_interval_in_milliseconds = \ 238 | self._property( 239 | self.property_prefix, self.command_key, 240 | 'metrics.health_snapshot.interval_in_milliseconds', 241 | self.default_metrics_health_snapshot_interval_in_milliseconds, 242 | setter.metrics_health_snapshot_interval_in_milliseconds()) 243 | 244 | # Whether command request logging is enabled 245 | self._request_log_enabled = \ 246 | self._property( 247 | property_prefix, self.command_key, 'request_log.enabled', 248 | self.default_request_log_enabled, 249 | setter.request_log_enabled()) 250 | 251 | # Whether request caching is enabled 252 | self._request_cache_enabled = \ 253 | self._property( 254 | self.property_prefix, self.command_key, 255 | 'request_cache.enabled', 256 | self.default_request_cache_enabled, 257 | setter.request_cache_enabled()) 258 | 259 | # threadpool doesn't have a global override, only instance level 260 | # makes sense 261 | # self.execution_isolation_thread_pool_key_override = \ 262 | # as__property( 263 | # DynamicStringProperty( 264 | # '{`.command.{`.thread_pool_key_override'.format( 265 | # self.property_prefix, self.command_key), None)) 266 | 267 | def circuit_breaker_enabled(self): 268 | """ Whether to use a :class:`hystrix.CircuitBreaker` or not. If false no 269 | circuit-breaker logic will be used and all requests permitted. 270 | 271 | This is similar in effect to :class:`#circuitBreakerForceClosed()` 272 | except that continues tracking metrics and knowing whether it should be 273 | open/closed, this property results in not even instantiating a 274 | circuit-breaker. 275 | 276 | Returns: 277 | bool: ``True`` or ``False`` 278 | """ 279 | return self._circuit_breaker_enabled 280 | 281 | def circuit_breaker_error_threshold_percentage(self): 282 | """ Error percentage threshold (as whole number such as 50) at which 283 | point the circuit breaker will trip open and reject requests. 284 | 285 | It will stay tripped for the duration defined in 286 | :class:`#circuitBreakerSleepWindowInMilliseconds()`; 287 | 288 | The error percentage this is compared against comes from 289 | :class:`hystrix.CommandMetrics#getHealthCounts()`. 290 | 291 | Returns: 292 | int: Error percentage 293 | """ 294 | return self._circuit_breaker_error_threshold_percentage 295 | 296 | def circuit_breaker_force_closed(self): 297 | """ If true the :class:`hystrix.CircuitBreaker#allowRequest()` will 298 | always return true to allow requests regardless of the error percentage 299 | from :class:`hystrix.CommandMetrics#getHealthCounts()`. 300 | 301 | The :class:`#circuitBreakerForceOpen()` property takes precedence so 302 | if it set to true this property does nothing. 303 | 304 | Returns: 305 | bool: ``True`` or ``False`` 306 | """ 307 | return self._circuit_breaker_force_closed 308 | 309 | def circuit_breaker_force_open(self): 310 | """ If true the :class:`hystrix.CircuitBreaker#allowRequest()` will 311 | always return false, causing the circuit to be open (tripped) and 312 | reject all requests. 313 | 314 | This property takes precedence over 315 | :class:`#circuitBreakerForceClosed()`; 316 | 317 | Returns: 318 | bool: ``True`` or ``False`` 319 | """ 320 | return self._circuit_breaker_force_open 321 | 322 | def circuit_breaker_request_volume_threshold(self): 323 | """ Minimum number of requests in the 324 | :class:`#metricsRollingStatisticalWindowInMilliseconds()` that must 325 | exist before the :class:`hystrix.CircuitBreaker` will trip. 326 | 327 | If below this number the circuit will not trip regardless of error 328 | percentage. 329 | 330 | Returns: 331 | int: Number of request 332 | """ 333 | return self._circuit_breaker_request_volume_threshold 334 | 335 | def circuit_breaker_sleep_window_in_milliseconds(self): 336 | """ The time in milliseconds after a :class:`hystrix.CircuitBreaker` 337 | trips open that it should wait before trying requests again. 338 | 339 | Returns: 340 | int: Time in milliseconds 341 | """ 342 | return self._circuit_breaker_sleep_window_in_milliseconds 343 | 344 | def execution_isolation_semaphore_max_concurrent_requests(self): 345 | """ Number of concurrent requests permitted to 346 | :class:`hystrix.Command#run()`. Requests beyond the concurrent limit 347 | will be rejected. 348 | 349 | Applicable only when: 350 | 351 | :class:`#executionIsolationStrategy()` == SEMAPHORE. 352 | 353 | Returns: 354 | int: Number of concurrent requests 355 | """ 356 | return self._execution_isolation_semaphore_max_concurrent_requests 357 | 358 | def execution_isolation_strategy(self): 359 | """ What isolation strategy :class:`hystrix.Command#run()` will be 360 | executed with. 361 | 362 | If :class:`ExecutionIsolationStrategy#THREAD` then it will be executed 363 | on a separate thread and concurrent requests limited by the number of 364 | threads in the thread-pool. 365 | 366 | If :class:`ExecutionIsolationStrategy#SEMAPHORE` then it will be 367 | executed on the calling thread and concurrent requests limited by the 368 | semaphore count. 369 | 370 | Returns: 371 | bool: ``True`` or ``False`` 372 | """ 373 | return self._execution_isolation_strategy 374 | 375 | def execution_isolation_thread_interrupt_on_timeout(self): 376 | """ Whether the execution thread should attempt an interrupt 377 | (using :class:`Future#cancel`) when a thread times out. 378 | 379 | Applicable only when :class:`#executionIsolationStrategy()` == THREAD. 380 | 381 | Returns: 382 | bool: ``True`` or ``False`` 383 | """ 384 | return self._execution_isolation_thread_interrupt_on_timeout 385 | 386 | def execution_timeout_in_milliseconds(self): 387 | """ Time in milliseconds at which point the command will timeout and 388 | halt execution. 389 | 390 | If :class:`#executionIsolationThreadInterruptOnTimeout` == true and the 391 | command is thread-isolated, the executing thread will be interrupted. 392 | If the command is semaphore-isolated and a 393 | :class:`hystrix.ObservableCommand`, that command will get unsubscribed. 394 | 395 | Returns: 396 | int: Time in milliseconds 397 | """ 398 | return self._execution_timeout_in_milliseconds 399 | 400 | def fallback_isolation_semaphore_max_concurrent_requests(self): 401 | """ Number of concurrent requests permitted to 402 | :class:`hystrix.Command#getFallback()`. Requests beyond the concurrent 403 | limit will fail-fast and not attempt retrieving a fallback. 404 | 405 | Returns: 406 | int: Number of concurrent requests 407 | """ 408 | return self._fallback_isolation_semaphore_max_concurrent_requests 409 | 410 | def fallback_enabled(self): 411 | """ Whether :class:`hystrix.Command#getFallback()` should be attempted 412 | when failure occurs. 413 | 414 | Returns: 415 | bool: ``True`` or ``False`` 416 | """ 417 | return self._fallback_enabled 418 | 419 | def metrics_health_snapshot_interval_in_milliseconds(self): 420 | """ Time in milliseconds to wait between allowing health snapshots to 421 | be taken that calculate success and error percentages and affect 422 | :class:`hystrix.CircuitBreaker#isOpen()` status. 423 | 424 | On high-volume circuits the continual calculation of error percentage 425 | can become CPU intensive thus this controls how often it is 426 | calculated. 427 | 428 | Returns: 429 | int: Time in milliseconds 430 | """ 431 | return self._metrics_health_snapshot_interval_in_milliseconds 432 | 433 | def metrics_rolling_percentile_bucket_size(self): 434 | """ Maximum number of values stored in each bucket of the rolling 435 | percentile. This is passed into :class:`hystrix.RollingPercentile` 436 | inside :class:`hystrix.CommandMetrics`. 437 | 438 | Returns: 439 | int: Maximum number of values stored in each bucket 440 | """ 441 | return self._metrics_rolling_percentile_bucket_size 442 | 443 | def metrics_rolling_percentile_enabled(self): 444 | """ Whether percentile metrics should be captured using 445 | :class:`hystrix.RollingPercentile` inside 446 | :class:`hystrix.CommandMetrics`. 447 | 448 | Returns: 449 | bool: ``True`` or ``False`` 450 | """ 451 | return self._metrics_rolling_percentile_enabled 452 | 453 | def metrics_rolling_percentile_window_in_milliseconds(self): 454 | """ Duration of percentile rolling window in milliseconds. This is 455 | passed into :class:`hystrix.RollingPercentile` inside 456 | :class:`hystrix.CommandMetrics`. 457 | 458 | Returns: 459 | int: Milliseconds 460 | """ 461 | return self._metrics_rolling_percentile_window_in_milliseconds 462 | 463 | def metrics_rolling_percentile_window_buckets(self): 464 | """ Number of buckets the rolling percentile window is broken into. 465 | This is passed into :class:`hystrix.RollingPercentile` inside 466 | :class:`hystrix.CommandMetrics`. 467 | 468 | Returns: 469 | int: Buckets 470 | """ 471 | return self._metrics_rolling_percentile_window_buckets 472 | 473 | def metrics_rolling_statistical_window_in_milliseconds(self): 474 | """ Duration of statistical rolling window in milliseconds. This is 475 | passed into :class:`hystrix.RollingNumber` inside 476 | :class:`hystrix.CommandMetrics`. 477 | 478 | Returns: 479 | int: Milliseconds 480 | """ 481 | return self._metrics_rolling_statistical_window_in_milliseconds 482 | 483 | def metrics_rolling_statistical_window_buckets(self): 484 | """ Number of buckets the rolling statistical window is broken into. 485 | This is passed into :class:`hystrix.RollingNumber` inside 486 | :class:`hystrix.CommandMetrics`. 487 | 488 | Returns: 489 | int: Buckets 490 | """ 491 | return self._metrics_rolling_statistical_window_buckets 492 | 493 | def request_cache_enabled(self): 494 | """ Whether :class:`hystrix.Command.getCacheKey()` should be used with 495 | :class:`hystrix.RequestCache` to provide de-duplication functionality 496 | via request-scoped caching. 497 | 498 | Returns: 499 | bool: ``True`` or ``False`` 500 | """ 501 | return self._request_cache_enabled 502 | 503 | def request_log_enabled(self): 504 | """ Whether :class:`hystrix.command.Command` execution and events 505 | should be logged to :class:`hystrix.request.RequestLog`. 506 | 507 | Returns: 508 | bool: ``True`` or ``False`` 509 | """ 510 | return self._request_log_enabled 511 | 512 | def _property(self, property_prefix, command_key, instance_property, 513 | default_value, setter_override_value=None): 514 | """ Get property from a networked plugin 515 | """ 516 | 517 | # The setter override should take precedence over default_value 518 | if setter_override_value is not None: 519 | return setter_override_value 520 | else: 521 | return default_value 522 | 523 | @classmethod 524 | def setter(klass): 525 | """ Factory method to retrieve the default Setter """ 526 | return klass.Setter() 527 | 528 | class Setter(object): 529 | """ Fluent interface that allows chained setting of properties 530 | 531 | That can be passed into a :class:`hystrix.command.Command` constructor 532 | to inject instance specific property overrides. 533 | 534 | Example:: 535 | 536 | >>> CommandProperties.setter() 537 | .with_execution_timeout_in_milliseconds(100) 538 | .with_execute_command_on_separate_thread(True) 539 | """ 540 | 541 | def __init__(self): 542 | self._circuit_breaker_enabled = None 543 | self._circuit_breaker_error_threshold_percentage = None 544 | self._circuit_breaker_force_closed = None 545 | self._circuit_breaker_force_open = None 546 | self._circuit_breaker_request_volume_threshold = None 547 | self._circuit_breaker_sleep_window_in_milliseconds = None 548 | self._execution_isolation_semaphore_max_concurrent_requests = None 549 | self._execution_isolation_strategy = None 550 | self._execution_isolation_thread_interrupt_on_timeout = None 551 | self._execution_timeout_in_milliseconds = None 552 | self._fallback_isolation_semaphore_max_concurrent_requests = None 553 | self._fallback_enabled = None 554 | self._metrics_health_snapshot_interval_in_milliseconds = None 555 | self._metrics_rolling_percentile_bucket_size = None 556 | self._metrics_rolling_percentile_enabled = None 557 | self._metrics_rolling_percentile_window_in_milliseconds = None 558 | self._metrics_rolling_percentile_window_buckets = None 559 | self._metrics_rolling_statistical_window_in_milliseconds = None 560 | self._metrics_rolling_statistical_window_buckets = None 561 | self._request_cache_enabled = None 562 | self._request_log_enabled = None 563 | 564 | def circuit_breaker_enabled(self): 565 | return self._circuit_breaker_enabled 566 | 567 | def circuit_breaker_error_threshold_percentage(self): 568 | return self._circuit_breaker_error_threshold_percentage 569 | 570 | def circuit_breaker_force_closed(self): 571 | return self._circuit_breaker_force_closed 572 | 573 | def circuit_breaker_force_open(self): 574 | return self._circuit_breaker_force_open 575 | 576 | def circuit_breaker_request_volume_threshold(self): 577 | return self._circuit_breaker_request_volume_threshold 578 | 579 | def circuit_breaker_sleep_window_in_milliseconds(self): 580 | return self._circuit_breaker_sleep_window_in_milliseconds 581 | 582 | def execution_isolation_semaphore_max_concurrent_requests(self): 583 | return self._execution_isolation_semaphore_max_concurrent_requests 584 | 585 | def execution_isolation_strategy(self): 586 | return self._execution_isolation_strategy 587 | 588 | def execution_isolation_thread_interrupt_on_timeout(self): 589 | return self._execution_isolation_thread_interrupt_on_timeout 590 | 591 | def execution_timeout_in_milliseconds(self): 592 | return self._execution_timeout_in_milliseconds 593 | 594 | def fallback_isolation_semaphore_max_concurrent_requests(self): 595 | return self._fallback_isolation_semaphore_max_concurrent_requests 596 | 597 | def fallback_enabled(self): 598 | return self._fallback_enabled 599 | 600 | def metrics_health_snapshot_interval_in_milliseconds(self): 601 | return self._metrics_health_snapshot_interval_in_milliseconds 602 | 603 | def metrics_rolling_percentile_bucket_size(self): 604 | return self._metrics_rolling_percentile_bucket_size 605 | 606 | def metrics_rolling_percentile_enabled(self): 607 | return self._metrics_rolling_percentile_enabled 608 | 609 | def metrics_rolling_percentile_window_in_milliseconds(self): 610 | return self._metrics_rolling_percentile_window_in_milliseconds 611 | 612 | def metrics_rolling_percentile_window_buckets(self): 613 | return self._metrics_rolling_percentile_window_buckets 614 | 615 | def metrics_rolling_statistical_window_in_milliseconds(self): 616 | return self._metrics_rolling_statistical_window_in_milliseconds 617 | 618 | def metrics_rolling_statistical_window_buckets(self): 619 | return self._metrics_rolling_statistical_window_buckets 620 | 621 | def request_cache_enabled(self): 622 | return self._request_cache_enabled 623 | 624 | def request_log_enabled(self): 625 | return self._request_log_enabled 626 | 627 | def with_circuit_breaker_enabled(self, value): 628 | self._circuit_breaker_enabled = value 629 | return self 630 | 631 | def with_circuit_breaker_error_threshold_percentage(self, value): 632 | self._circuit_breaker_error_threshold_percentage = value 633 | return self 634 | 635 | def with_circuit_breaker_force_closed(self, value): 636 | self._circuit_breaker_force_closed = value 637 | return self 638 | 639 | def with_circuit_breaker_force_open(self, value): 640 | self._circuit_breaker_force_open = value 641 | return self 642 | 643 | def with_circuit_breaker_request_volume_threshold(self, value): 644 | self._circuit_breaker_request_volume_threshold = value 645 | return self 646 | 647 | def with_circuit_breaker_sleep_window_in_milliseconds(self, value): 648 | self._circuit_breaker_sleep_window_in_milliseconds = value 649 | return self 650 | 651 | def with_execution_isolation_semaphore_max_concurrent_requests(self, value): 652 | self._execution_isolation_semaphore_max_concurrent_requests = value 653 | return self 654 | 655 | def with_execution_isolation_strategy(self, value): 656 | self._execution_isolation_strategy = value 657 | return self 658 | 659 | def with_execution_isolation_thread_interrupt_on_timeout(self, value): 660 | self._execution_isolation_thread_interrupt_on_timeout = value 661 | return self 662 | 663 | def with_execution_timeout_in_milliseconds(self, value): 664 | self._execution_timeout_in_milliseconds = value 665 | return self 666 | 667 | def with_fallback_isolation_semaphore_max_concurrent_requests(self, value): 668 | self._fallback_isolation_semaphore_max_concurrent_requests = value 669 | return self 670 | 671 | def with_fallback_enabled(self, value): 672 | self._fallback_enabled = value 673 | return self 674 | 675 | def with_metrics_health_snapshot_interval_in_milliseconds(self, value): 676 | self._metrics_health_snapshot_interval_in_milliseconds = value 677 | return self 678 | 679 | def with_metrics_rolling_percentile_bucket_size(self, value): 680 | self._metrics_rolling_percentile_bucket_size = value 681 | return self 682 | 683 | def with_metrics_rolling_percentile_enabled(self, value): 684 | self._metrics_rolling_percentile_enabled = value 685 | return self 686 | 687 | def with_metrics_rolling_percentile_window_in_milliseconds(self, value): 688 | self._metrics_rolling_percentile_window_in_milliseconds = value 689 | return self 690 | 691 | def with_metrics_rolling_percentile_window_buckets(self, value): 692 | self._metrics_rolling_percentile_window_buckets = value 693 | return self 694 | 695 | def with_metrics_rolling_statistical_window_in_milliseconds(self, value): 696 | self._metrics_rolling_statistical_window_in_milliseconds = value 697 | return self 698 | 699 | def with_metrics_rolling_statistical_window_buckets(self, value): 700 | self._metrics_rolling_statistical_window_buckets = value 701 | return self 702 | 703 | def with_request_cache_enabled(self, value): 704 | self._request_cache_enabled = value 705 | return self 706 | 707 | def with_request_log_enabled(self, value): 708 | self._request_log_enabled = value 709 | return self 710 | -------------------------------------------------------------------------------- /hystrix/event_type.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from enum import Enum 3 | import logging 4 | 5 | 6 | log = logging.getLogger(__name__) 7 | 8 | 9 | class EventType(Enum): 10 | """ Various states/events that execution can result in or have tracked. 11 | 12 | These are most often accessed via :class:`hystrix.request_log.RequestLog` 13 | or :meth:`hystrix.command.Command.execution_events()`. 14 | """ 15 | 16 | EMIT = 1 17 | SUCCESS = 2 18 | FAILURE = 3 19 | TIMEOUT = 4 20 | SHORT_CIRCUITED = 5 21 | THREAD_POOL_REJECTED = 6 22 | SEMAPHORE_REJECTED = 7 23 | FALLBACK_EMIT = 8 24 | FALLBACK_SUCCESS = 9 25 | FALLBACK_FAILURE = 10 26 | FALLBACK_REJECTION = 11 27 | EXCEPTION_THROWN = 12 28 | RESPONSE_FROM_CACHE = 13 29 | COLLAPSED = 14 30 | BAD_REQUEST = 15 31 | -------------------------------------------------------------------------------- /hystrix/group.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | import logging 3 | 4 | import six 5 | 6 | from .pool import Pool 7 | 8 | log = logging.getLogger(__name__) 9 | 10 | 11 | class GroupMetaclass(type): 12 | 13 | __instances__ = dict() 14 | __blacklist__ = ('Group', 'GroupMetaclass') 15 | 16 | def __new__(cls, name, bases, attrs): 17 | 18 | if name in cls.__blacklist__: 19 | return super(GroupMetaclass, cls).__new__(cls, name, bases, attrs) 20 | 21 | group_key = attrs.get('group_key') or '{}Group'.format(name) 22 | new_class = super(GroupMetaclass, cls).__new__(cls, group_key, 23 | bases, attrs) 24 | 25 | pool_key = attrs.get('poll_key') or '{}Pool'.format(group_key) 26 | NewPool = type(pool_key, (Pool,), 27 | dict(pool_key=pool_key)) 28 | 29 | setattr(new_class, 'pool', NewPool()) 30 | setattr(new_class, 'pool_key', pool_key) 31 | setattr(new_class, 'group_key', group_key) 32 | 33 | if group_key not in cls.__instances__: 34 | cls.__instances__[group_key] = new_class 35 | 36 | return cls.__instances__[group_key] 37 | 38 | 39 | class Group(six.with_metaclass(GroupMetaclass, object)): 40 | 41 | group_key = None 42 | pool_key = None 43 | -------------------------------------------------------------------------------- /hystrix/metrics.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | import logging 3 | 4 | log = logging.getLogger(__name__) 5 | 6 | 7 | # TODO: Rename this to AbstractMetrics 8 | class Metrics(object): 9 | """ Base class for metrics 10 | 11 | Args: 12 | counter (:class:`hystrix.rolling_number.RollingNumber`): Used to 13 | increment or set values over time. 14 | """ 15 | 16 | def __init__(self, counter): 17 | self.counter = counter 18 | 19 | def cumulative_count(self, event): 20 | """ Cumulative count 21 | 22 | Get the **cumulative** count since the start of the application for the 23 | given :class:`RollingNumberEvent`. 24 | 25 | Args: 26 | event (:class:`RollingNumberEvent`): The Event to retrieve a 27 | **sum** for. 28 | 29 | Returns: 30 | long: Returns the long cumulative count. 31 | """ 32 | return self.counter.cumulative_sum(event) 33 | 34 | def rolling_count(self, event): 35 | """ **Rolling** count 36 | 37 | Get the rolling count for the given:class:`RollingNumberEvent`. 38 | 39 | Args: 40 | event (:class:`RollingNumberEvent`): The Event to retrieve a 41 | **sum** for. 42 | 43 | Returns: 44 | long: Returns the long cumulative count. 45 | """ 46 | return self.counter.rolling_sum(event) 47 | -------------------------------------------------------------------------------- /hystrix/pool.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from concurrent.futures import ProcessPoolExecutor 3 | import logging 4 | 5 | import six 6 | 7 | log = logging.getLogger(__name__) 8 | 9 | 10 | class PoolMetaclass(type): 11 | 12 | __instances__ = dict() 13 | __blacklist__ = ('Pool', 'PoolMetaclass') 14 | 15 | def __new__(cls, name, bases, attrs): 16 | 17 | if name in cls.__blacklist__: 18 | return super(PoolMetaclass, cls).__new__(cls, name, 19 | bases, attrs) 20 | 21 | pool_key = attrs.get('pool_key') or '{}Pool'.format(name) 22 | new_class = super(PoolMetaclass, cls).__new__(cls, pool_key, 23 | bases, attrs) 24 | 25 | setattr(new_class, 'pool_key', pool_key) 26 | 27 | if pool_key not in cls.__instances__: 28 | cls.__instances__[pool_key] = new_class 29 | 30 | return cls.__instances__[pool_key] 31 | 32 | 33 | class Pool(six.with_metaclass(PoolMetaclass, ProcessPoolExecutor)): 34 | 35 | pool_key = None 36 | 37 | def __init__(self, pool_key=None, max_workers=5): 38 | super(Pool, self).__init__(max_workers) 39 | -------------------------------------------------------------------------------- /hystrix/pool_metrics.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | import logging 3 | 4 | import six 5 | 6 | log = logging.getLogger(__name__) 7 | 8 | 9 | class PoolMetricsMetaclass(type): 10 | 11 | __instances__ = dict() 12 | __blacklist = ('PoolMetrics', 'PoolMetricsMetaclass') 13 | 14 | def __new__(cls, name, bases, attrs): 15 | 16 | if name in cls.__blacklist: 17 | return super(PoolMetricsMetaclass, cls).__new__(cls, name, 18 | bases, attrs) 19 | 20 | pool_metrics_key = attrs.get('pool_metrics_key') or \ 21 | '{}PoolMetrics'.format(name) 22 | 23 | new_class = super(PoolMetricsMetaclass, cls).__new__(cls, 24 | pool_metrics_key, 25 | bases, attrs) 26 | setattr(new_class, 'pool_metrics_key', pool_metrics_key) 27 | 28 | if pool_metrics_key not in cls.__instances__: 29 | cls.__instances__[pool_metrics_key] = new_class 30 | 31 | return cls.__instances__[pool_metrics_key] 32 | 33 | 34 | class PoolMetrics(six.with_metaclass(PoolMetricsMetaclass, object)): 35 | 36 | pool_metrics_key = None 37 | -------------------------------------------------------------------------------- /hystrix/rolling_number.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from multiprocessing import RLock 3 | from collections import deque 4 | import logging 5 | import types 6 | import time 7 | 8 | import six 9 | 10 | from atomos.multiprocessing.atomic import AtomicLong 11 | 12 | log = logging.getLogger(__name__) 13 | 14 | 15 | class RollingNumber(object): 16 | """ A **number** which can be used to track **counters** (increment) or set 17 | values over time. 18 | 19 | It is *rolling* in the sense that a :attr:`milliseconds` is 20 | given that you want to track (such as 10 seconds) and then that is broken 21 | into **buckets** (defaults to 10) so that the 10 second window doesn't 22 | empty out and restart every 10 seconds, but instead every 1 second you 23 | have a new :class:`Bucket` added and one dropped so that 9 of the buckets 24 | remain and only the newest starts from scratch. 25 | 26 | This is done so that the statistics are gathered over a *rolling* 10 27 | second window with data being added/dropped in 1 second intervals 28 | (or whatever granularity is defined by the arguments) rather than 29 | each 10 second window starting at 0 again. 30 | 31 | Performance-wise this class is optimized for writes, not reads. This is 32 | done because it expects far higher write volume (thousands/second) than 33 | reads (a few per second). 34 | 35 | For example, on each read to getSum/getCount it will iterate buckets to 36 | sum the data so that on writes we don't need to maintain the overall sum 37 | and pay the synchronization cost at each write to ensure the sum is 38 | up-to-date when the read can easily iterate each bucket to get the sum 39 | when it needs it. 40 | 41 | See test module :mod:`tests.test_rolling_number` for usage and expected 42 | behavior examples. 43 | """ 44 | 45 | def __init__(self, milliseconds, bucket_numbers, _time=None): 46 | self.time = _time or ActualTime() # Create a instance of time here 47 | self.milliseconds = milliseconds 48 | self.buckets = BucketCircular(bucket_numbers) 49 | self.bucket_numbers = bucket_numbers 50 | self.cumulative = CumulativeSum() 51 | self._new_bucket_lock = RLock() 52 | 53 | if self.milliseconds % self.bucket_numbers != 0: 54 | raise Exception('The milliseconds must divide equally into ' 55 | 'bucket_numbers. For example 1000/10 is ok, ' 56 | '1000/11 is not.') 57 | 58 | def buckets_size_in_milliseconds(self): 59 | return self.milliseconds / self.bucket_numbers 60 | 61 | def increment(self, event): 62 | """ Increment the **counter** in the current bucket by one for the 63 | given :class:`RollingNumberEvent` type. 64 | 65 | 66 | The :class:`RollingNumberEvent` must be a **counter** type 67 | 68 | >>> RollingNumberEvent.isCounter() 69 | True 70 | 71 | Args: 72 | event (:class:`RollingNumberEvent`): Event defining which 73 | **counter** to increment. 74 | """ 75 | self.current_bucket().adder(event).increment() 76 | 77 | def update_rolling_max(self, event, value): 78 | """ Update a value and retain the max value. 79 | 80 | The :class:`RollingNumberEvent` must be a **max updater** type 81 | 82 | >>> RollingNumberEvent.isMaxUpdater() 83 | True 84 | 85 | Args: 86 | value (int): Max value to update. 87 | event (:class:`RollingNumberEvent`): Event defining which 88 | **counter** to increment. 89 | 90 | """ 91 | self.current_bucket().max_updater(event).update(value) 92 | 93 | def current_bucket(self): 94 | """ Retrieve the current :class:`Bucket` 95 | 96 | Retrieve the latest :class:`Bucket` if the given time is **BEFORE** 97 | the end of the **bucket** window, otherwise it returns ``None``. 98 | 99 | The following needs to be synchronized/locked even with a 100 | synchronized/thread-safe data structure such as LinkedBlockingDeque 101 | because the logic involves multiple steps to check existence, 102 | create an object then insert the object. The 'check' or 'insertion' 103 | themselves are thread-safe by themselves but not the aggregate 104 | algorithm, thus we put this entire block of logic inside 105 | synchronized. 106 | 107 | I am using a :class:`multiprocessing.RLock` if/then 108 | so that a single thread will get the lock and as soon as one thread 109 | gets the lock all others will go the 'else' block and just return 110 | the currentBucket until the newBucket is created. This should allow 111 | the throughput to be far higher and only slow down 1 thread instead 112 | of blocking all of them in each cycle of creating a new bucket based 113 | on some testing (and it makes sense that it should as well). 114 | 115 | This means the timing won't be exact to the millisecond as to what 116 | data ends up in a bucket, but that's acceptable. It's not critical 117 | to have exact precision to the millisecond, as long as it's rolling, 118 | if we can instead reduce the impact synchronization. 119 | 120 | More importantly though it means that the 'if' block within the 121 | lock needs to be careful about what it changes that can still 122 | be accessed concurrently in the 'else' block since we're not 123 | completely synchronizing access. 124 | 125 | For example, we can't have a multi-step process to add a bucket, 126 | remove a bucket, then update the sum since the 'else' block of code 127 | can retrieve the sum while this is all happening. The trade-off is 128 | that we don't maintain the rolling sum and let readers just iterate 129 | bucket to calculate the sum themselves. This is an example of 130 | favoring write-performance instead of read-performance and how the 131 | tryLock versus a synchronized block needs to be accommodated. 132 | 133 | Returns: 134 | bucket: Returns the latest :class:`Bucket` or ``None``. 135 | """ 136 | 137 | # TODO: Check the doc string above^. 138 | current_time = self.time.current_time_in_millis() 139 | 140 | # a shortcut to try and get the most common result of immediately 141 | # finding the current bucket 142 | 143 | # Retrieve the latest bucket if the given time is BEFORE the end of 144 | # the bucket window, otherwise it returns None. 145 | 146 | # NOTE: This is thread-safe because it's accessing 'buckets' which is 147 | # a ?LinkedBlockingDeque? 148 | current_bucket = self.buckets.peek_last() 149 | if current_bucket is not None and current_time < (current_bucket.window_start + self.buckets_size_in_milliseconds()): 150 | return current_bucket 151 | 152 | with self._new_bucket_lock: 153 | # If we didn't find the current bucket above, then we have to 154 | # create one. 155 | if self.buckets.peek_last() is None: 156 | new_bucket = Bucket(current_time) 157 | self.buckets.add_last(new_bucket) 158 | return new_bucket 159 | else: 160 | for i in range(self.bucket_numbers): 161 | last_bucket = self.buckets.peek_last() 162 | if current_time < (last_bucket.window_start + self.buckets_size_in_milliseconds()): 163 | return last_bucket 164 | elif current_time - (last_bucket.window_start + self.buckets_size_in_milliseconds()) > self.milliseconds: 165 | self.reset() 166 | return self.current_bucket() 167 | else: 168 | self.buckets.add_last(Bucket(last_bucket.window_start + self.buckets_size_in_milliseconds())) 169 | self.cumulative.add_bucket(last_bucket) 170 | 171 | return self.buckets.peek_last() 172 | 173 | # we didn't get the lock so just return the latest bucket while 174 | # another thread creates the next one 175 | current_bucket = self.buckets.peek_last() 176 | if current_bucket is not None: 177 | return current_bucket 178 | else: 179 | # The rare scenario where multiple threads raced to create the 180 | # very first bucket wait slightly and then use recursion while 181 | # the other thread finishes creating a bucket 182 | time.sleep(5) 183 | self.current_bucket() 184 | 185 | def reset(self): 186 | """ Reset all rolling **counters** 187 | 188 | Force a reset of all rolling **counters** (clear all **buckets**) so 189 | that statistics start being gathered from scratch. 190 | 191 | This does NOT reset the :class:`CumulativeSum` values. 192 | """ 193 | last_bucket = self.buckets.peek_last() 194 | if last_bucket: 195 | self.cumulative.add_bucket(last_bucket) 196 | 197 | self.buckets.clear() 198 | 199 | def rolling_sum(self, event): 200 | """ Rolling sum 201 | 202 | Get the sum of all buckets in the rolling counter for the given 203 | :class:`RollingNumberEvent`. 204 | 205 | The :class:`RollingNumberEvent` must be a **counter** type 206 | 207 | >>> RollingNumberEvent.isCounter() 208 | True 209 | 210 | Args: 211 | event (:class:`RollingNumberEvent`): Event defining which counter 212 | to retrieve values from. 213 | 214 | Returns: 215 | long: Return value from the given :class:`RollingNumberEvent` 216 | counter type. 217 | """ 218 | last_bucket = self.current_bucket() 219 | if not last_bucket: 220 | return 0 221 | 222 | sum = 0 223 | for bucket in self.buckets: 224 | sum += bucket.adder(event).sum() 225 | return sum 226 | 227 | def rolling_max(self, event): 228 | values = self.values(event) 229 | if not values: 230 | return 0 231 | else: 232 | return values[len(values) - 1] 233 | 234 | def values(self, event): 235 | last_bucket = self.current_bucket() 236 | if not last_bucket: 237 | return 0 238 | 239 | values = [] 240 | for bucket in self.buckets: 241 | if event.is_counter(): 242 | values.append(bucket.adder(event).sum()) 243 | if event.is_max_updater(): 244 | values.append(bucket.max_updater(event).max()) 245 | return values 246 | 247 | def value_of_latest_bucket(self, event): 248 | last_bucket = self.current_bucket() 249 | if not last_bucket: 250 | return 0 251 | 252 | return last_bucket.get(event) 253 | 254 | def cumulative_sum(self, event): 255 | """ Cumulative sum 256 | 257 | The cumulative sum of all buckets ever since the start without 258 | rolling for the given :class`RollingNumberEvent` type. 259 | 260 | See :meth:`rolling_sum` for the rolling sum. 261 | 262 | The :class:`RollingNumberEvent` must be a **counter** type 263 | 264 | >>> RollingNumberEvent.isCounter() 265 | True 266 | 267 | Args: 268 | event (:class:`RollingNumberEvent`): Event defining which 269 | **counter** to increment. 270 | 271 | Returns: 272 | long: Returns the cumulative sum of all **increments** and 273 | **adds** for the given :class:`RollingNumberEvent` **counter** 274 | type. 275 | 276 | """ 277 | return self.value_of_latest_bucket(event) + self.cumulative.get(event) 278 | 279 | 280 | class BucketCircular(deque): 281 | ''' This is a circular array acting as a FIFO queue. ''' 282 | 283 | def __init__(self, size): 284 | super(BucketCircular, self).__init__(maxlen=size) 285 | 286 | @property 287 | def size(self): 288 | return len(self) 289 | 290 | def last(self): 291 | return self.peek_last() 292 | 293 | def peek_last(self): 294 | try: 295 | return self[0] 296 | except IndexError: 297 | return None 298 | 299 | def add_last(self, bucket): 300 | self.appendleft(bucket) 301 | 302 | 303 | class Bucket(object): 304 | """ Counters for a given `bucket` of time 305 | 306 | We support both :class:`LongAdder` and :class:`LongMaxUpdater` in a 307 | :class:`Bucket` but don't want the memory allocation of all types for each 308 | so we only allocate the objects if the :class:`RollingNumberEvent` matches 309 | the correct **type** - though we still have the allocation of empty arrays 310 | to the given length as we want to keep using the **type** value for fast 311 | random access. 312 | """ 313 | 314 | def __init__(self, start_time): 315 | self.window_start = start_time 316 | self._adder = {} 317 | self._max_updater = {} 318 | 319 | # TODO: Change this to use a metaclass 320 | for name, event in RollingNumberEvent.__members__.items(): 321 | if event.is_counter(): 322 | self._adder[event.name] = LongAdder() 323 | 324 | for name, event in RollingNumberEvent.__members__.items(): 325 | if event.is_max_updater(): 326 | self._max_updater[event.name] = LongMaxUpdater() 327 | 328 | def get(self, event): 329 | if event.is_counter(): 330 | return self.adder(event).sum() 331 | 332 | if event.is_max_updater(): 333 | return self.max_updater(event).max() 334 | 335 | raise Exception('Unknown type of event.') 336 | 337 | # TODO: Rename to add 338 | def adder(self, event): 339 | if event.is_counter(): 340 | return self._adder[event.name] 341 | 342 | raise Exception('Type is not a LongAdder.') 343 | 344 | # TODO: Rename to update_max 345 | def max_updater(self, event): 346 | if event.is_max_updater(): 347 | return self._max_updater[event.name] 348 | 349 | raise Exception('Type is not a LongMaxUpdater.') 350 | 351 | 352 | # TODO: Move this to hystrix/util/long_adder.py 353 | class LongAdder(object): 354 | 355 | def __init__(self, min_value=0): 356 | self._count = AtomicLong(value=min_value) 357 | 358 | def increment(self): 359 | self._count.add_and_get(1) 360 | 361 | def decrement(self): 362 | self._count.subtract_and_get(1) 363 | 364 | def sum(self): 365 | return self._count.get() 366 | 367 | def add(self, value): 368 | self._count.add_and_get(value) 369 | 370 | 371 | # TODO: Move this to hystrix/util/long_max_updater.py 372 | class LongMaxUpdater(object): 373 | 374 | def __init__(self, min_value=0): 375 | self._count = AtomicLong(value=min_value) 376 | 377 | def max(self): 378 | return self._count.get() 379 | 380 | def update(self, value): 381 | if value > self.max(): 382 | self._count.set(value) 383 | 384 | 385 | class CumulativeSum(object): 386 | 387 | def __init__(self): 388 | self._adder = {} 389 | self._max_updater = {} 390 | 391 | # TODO: Change this to use a metaclass 392 | for name, event in RollingNumberEvent.__members__.items(): 393 | if event.is_counter(): 394 | self._adder[event.name] = LongAdder() 395 | 396 | for name, event in RollingNumberEvent.__members__.items(): 397 | if event.is_max_updater(): 398 | self._max_updater[event.name] = LongMaxUpdater() 399 | 400 | def add_bucket(self, bucket): 401 | for name, event in RollingNumberEvent.__members__.items(): 402 | if event.is_counter(): 403 | self.adder(event).add(bucket.adder(event).sum()) 404 | 405 | if event.is_max_updater(): 406 | self.max_updater(event).update(bucket.max_updater(event).max()) 407 | 408 | def get(self, event): 409 | if event.is_counter(): 410 | return self.adder(event).sum() 411 | 412 | if event.is_max_updater(): 413 | return self.max_updater(event).max() 414 | 415 | raise Exception('Unknown type of event.') 416 | 417 | def adder(self, event): 418 | if event.is_counter(): 419 | return self._adder[event.name] 420 | 421 | raise Exception('Unknown type of event.') 422 | 423 | def max_updater(self, event): 424 | if event.is_max_updater(): 425 | return self._max_updater[event.name] 426 | 427 | raise Exception('Unknown type of event.') 428 | 429 | 430 | def _is_function(obj): 431 | return isinstance(obj, types.FunctionType) 432 | 433 | 434 | def _is_dunder(name): 435 | return (name[:2] == name[-2:] == '__' and 436 | name[2:3] != '_' and 437 | name[-3:-2] != '_' and 438 | len(name) > 4) 439 | 440 | 441 | class Event(object): 442 | 443 | def __init__(self, name, value): 444 | self._name = name 445 | self._value = value 446 | 447 | def is_counter(self): 448 | return self._value == 1 449 | 450 | def is_max_updater(self): 451 | return self._value == 2 452 | 453 | @property 454 | def name(self): 455 | return self._name 456 | 457 | @property 458 | def value(self): 459 | return self._value 460 | 461 | 462 | class EventMetaclass(type): 463 | 464 | def __new__(cls, name, bases, attrs): 465 | __members = {} 466 | 467 | for name, value in attrs.items(): 468 | if not _is_dunder(name) and not _is_function(value): 469 | __members[name] = Event(name, value) 470 | 471 | for name, value in __members.items(): 472 | attrs[name] = __members[name] 473 | 474 | new_class = super(EventMetaclass, cls).__new__(cls, name, 475 | bases, attrs) 476 | 477 | setattr(new_class, '__members__', __members) 478 | 479 | return new_class 480 | 481 | 482 | # TODO: Move this to hystrix/util/rolling_number_event.py 483 | class RollingNumberEvent(six.with_metaclass(EventMetaclass, object)): 484 | """ Various states/events that can be captured in the 485 | :class:`RollingNumber`. 486 | 487 | Note that events are defined as different types: 488 | 489 | >>> self.is_counter() == True 490 | True 491 | 492 | >>> self.is_max_updater() == True 493 | True 494 | 495 | The **counter** type events can be used with 496 | :meth:`RollingNumber.increment`, :meth:`RollingNumber.add`, 497 | :meth:`RollingNumber.rolling_sum` and others. 498 | 499 | The **max updater** type events can be used with 500 | :meth:`RollingNumber.update_rolling_max` and 501 | :meth:`RollingNumber.rolling_max_value`. 502 | """ 503 | 504 | SUCCESS = 1 505 | FAILURE = 1 506 | TIMEOUT = 1 507 | SHORT_CIRCUITED = 1 508 | THREAD_POOL_REJECTED = 1 509 | SEMAPHORE_REJECTED = 1 510 | BAD_REQUEST = 1 511 | FALLBACK_SUCCESS = 1 512 | FALLBACK_FAILURE = 1 513 | FALLBACK_REJECTION = 1 514 | EXCEPTION_THROWN = 1 515 | THREAD_EXECUTION = 1 516 | THREAD_MAX_ACTIVE = 2 517 | COLLAPSED = 1 518 | RESPONSE_FROM_CACHE = 1 519 | 520 | def __init__(self, event): 521 | self._event = event 522 | 523 | def is_counter(self): 524 | """ Is counter 525 | 526 | Returns: 527 | bool: Returns ``True`` event type is **counter**, otherwise 528 | it returns ``False`` . 529 | """ 530 | return self._event.value == 1 531 | 532 | def is_max_updater(self): 533 | """ Is mas updater 534 | 535 | Returns: 536 | bool: Returns ``True`` event type is **max updater**, otherwise 537 | it returns ``False`` . 538 | """ 539 | return self._event.value == 2 540 | 541 | 542 | class ActualTime(object): 543 | """ Actual time 544 | """ 545 | 546 | def current_time_in_millis(self): 547 | """ Current time in milliseconds 548 | 549 | Returns: 550 | int: Returns :func:`time.time()` converted to milliseconds 551 | """ 552 | return int(round(time.time() * 1000)) 553 | -------------------------------------------------------------------------------- /hystrix/rolling_percentile.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from multiprocessing import RLock, Array 3 | import itertools 4 | import logging 5 | import time 6 | import math 7 | 8 | from hystrix.rolling_number import BucketCircular 9 | 10 | 11 | log = logging.getLogger(__name__) 12 | 13 | 14 | class RollingPercentile(object): 15 | 16 | def __init__(self, _time, milliseconds, bucket_numbers, 17 | bucket_data_length, enabled): 18 | self.time = _time 19 | self.milliseconds = milliseconds 20 | self.buckets = BucketCircular(bucket_numbers) 21 | self.bucket_numbers = bucket_numbers 22 | self.bucket_data_length = bucket_data_length 23 | self.enabled = enabled 24 | self.snapshot = PercentileSnapshot(0) 25 | self._new_bucket_lock = RLock() 26 | 27 | def buckets_size_in_milliseconds(self): 28 | return self.milliseconds / self.bucket_numbers 29 | 30 | def current_bucket(self): 31 | current_time = self.time.current_time_in_millis() 32 | current_bucket = self.buckets.peek_last() 33 | 34 | if current_bucket is not None and current_time < (current_bucket.window_start + self.buckets_size_in_milliseconds()): 35 | return current_bucket 36 | 37 | with self._new_bucket_lock: 38 | # If we didn't find the current bucket above, then we have to 39 | # create one. 40 | if self.buckets.peek_last() is None: 41 | new_bucket = Bucket(current_time, self.bucket_data_length) 42 | self.buckets.add_last(new_bucket) 43 | return new_bucket 44 | else: 45 | for i in range(self.bucket_numbers): 46 | last_bucket = self.buckets.peek_last() 47 | if current_time < (last_bucket.window_start + self.buckets_size_in_milliseconds()): 48 | return last_bucket 49 | elif current_time - (last_bucket.window_start + self.buckets_size_in_milliseconds()) > self.milliseconds: 50 | self.reset() 51 | return self.current_bucket() 52 | else: 53 | all_buckets = [b for b in self.buckets] 54 | self.buckets.add_last(Bucket(last_bucket.window_start + self.buckets_size_in_milliseconds(), self.bucket_data_length)) 55 | self.snapshot = PercentileSnapshot(*all_buckets) 56 | 57 | return self.buckets.peek_last() 58 | 59 | # we didn't get the lock so just return the latest bucket while 60 | # another thread creates the next one 61 | current_bucket = self.buckets.peek_last() 62 | if current_bucket is not None: 63 | return current_bucket 64 | else: 65 | # The rare scenario where multiple threads raced to create the 66 | # very first bucket wait slightly and then use recursion while 67 | # the other thread finishes creating a bucket 68 | time.sleep(5) 69 | self.current_bucket() 70 | 71 | def add_value(self, *values): 72 | ''' Add value (or values) to current bucket. 73 | ''' 74 | 75 | if not self.enabled: 76 | return 77 | 78 | for value in values: 79 | self.current_bucket().data.add_value(value) 80 | 81 | def percentile(self, percentile): 82 | if not self.enabled: 83 | return -1 84 | 85 | # Force logic to move buckets forward in case other requests aren't 86 | # making it happen 87 | self.current_bucket() 88 | 89 | # Fetch the current snapshot 90 | return self.current_percentile_snapshot().percentile(percentile) 91 | 92 | def current_percentile_snapshot(self): 93 | return self.snapshot 94 | 95 | def mean(self): 96 | if not self.enabled: 97 | return -1 98 | 99 | # Force logic to move buckets forward in case other requests aren't 100 | # making it happen 101 | self.current_bucket() 102 | 103 | # Fetch the current snapshot 104 | return self.current_percentile_snapshot().mean() 105 | 106 | 107 | class Bucket(object): 108 | ''' Counters for a given 'bucket' of time. ''' 109 | 110 | def __init__(self, start_time, bucket_data_length): 111 | self.window_start = start_time 112 | self.data = PercentileBucketData(bucket_data_length) 113 | 114 | 115 | class PercentileBucketData(object): 116 | 117 | def __init__(self, data_length): 118 | self.data_length = data_length 119 | self.list = Array('i', self.data_length, lock=RLock()) 120 | # TODO: Change this to use a generator 121 | self.index = itertools.count() 122 | self.number = 0 123 | 124 | def add_value(self, *latencies): 125 | # We just wrap around the beginning and over-write if we go past 126 | # 'data_length' as that will effectively cause us to "sample" the 127 | # most recent data 128 | for latency in latencies: 129 | self.number = next(self.index) 130 | self.list[self.number % self.data_length] = latency 131 | self.number = self.number + 1 132 | 133 | def length(self): 134 | if self.number > len(self.list): 135 | return len(self.list) 136 | else: 137 | return self.number 138 | 139 | 140 | class PercentileSnapshot(object): 141 | 142 | def __init__(self, *args): 143 | self.data = Array('i', 0, lock=RLock()) 144 | self._mean = 0 145 | self.length = 0 146 | 147 | if isinstance(args[0], int): 148 | self.data = list(args) 149 | self.length = len(args) 150 | self.buckets = [] 151 | 152 | _sum = 0 153 | for d in self.data: 154 | _sum += d 155 | 156 | self._mean = _sum / self.length 157 | self.data = Array('i', sorted(sorted(self.data), key=bool, 158 | reverse=True), lock=RLock()) 159 | 160 | elif isinstance(args[0], Bucket): 161 | self.length_from_buckets = 0 162 | self.buckets = args 163 | for bucket in self.buckets: 164 | self.length_from_buckets += bucket.data.data_length 165 | 166 | self.data = Array('i', self.length_from_buckets, lock=RLock()) 167 | _sum = 0 168 | index = 0 169 | for bucket in self.buckets: 170 | pbd = bucket.data 171 | length = pbd.length() 172 | for i in range(length): 173 | v = pbd.list[i] 174 | self.data[index] = v 175 | index += 1 176 | _sum += v 177 | 178 | self.length = index 179 | if self.length == 0: 180 | self._mean = 0 181 | else: 182 | self._mean = _sum / self.length 183 | 184 | self.data = Array('i', sorted(sorted(self.data), key=bool, 185 | reverse=True), lock=RLock()) 186 | 187 | def percentile(self, percentile): 188 | if self.length == 0: 189 | return 0 190 | 191 | return self.compute_percentile(percentile) 192 | 193 | def compute_percentile(self, percent): 194 | if self.length <= 0: 195 | return 0 196 | elif percent <= 0.0: 197 | return self.data[0] 198 | elif percent >= 100.0: 199 | return self.data[self.length - 1] 200 | 201 | rank = (percent / 100.0) * self.length 202 | 203 | # Linear interpolation between closest ranks 204 | ilow = int(math.floor(rank)) 205 | ihigh = int(math.ceil(rank)) 206 | 207 | assert 0 <= ilow and ilow <= rank and rank <= ihigh and ihigh <= self.length 208 | assert (ihigh - ilow) <= 1 209 | 210 | if ihigh >= self.length: 211 | # Another edge case 212 | return self.data[self.length - 1] 213 | elif ilow == ihigh: 214 | return self.data[ilow] 215 | else: 216 | # Interpolate between the two bounding values 217 | return int(self.data[ilow] + (rank - ilow) * (self.data[ihigh] - self.data[ilow])) 218 | 219 | def mean(self): 220 | return int(self._mean) 221 | -------------------------------------------------------------------------------- /hystrix/strategy/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wiliamsouza/hystrix-py/9876b39980bc8dcb334fcb0ee8c15d6949112203/hystrix/strategy/__init__.py -------------------------------------------------------------------------------- /hystrix/strategy/eventnotifier/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wiliamsouza/hystrix-py/9876b39980bc8dcb334fcb0ee8c15d6949112203/hystrix/strategy/eventnotifier/__init__.py -------------------------------------------------------------------------------- /hystrix/strategy/eventnotifier/event_notifier.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | import logging 3 | 4 | log = logging.getLogger(__name__) 5 | 6 | 7 | class AbstractBaseEventNotifier(object): 8 | """ Abstract base EventNotifier that allows receiving notifications for 9 | different events with default implementations. 10 | 11 | See :class:`hystrix.strategy.plugins.Plugin` or the Hystrix GitHub Wiki 12 | for information on `configuring plugins 13 | `_. 14 | 15 | .. note:: 16 | Note on thread-safety and performance 17 | 18 | A single implementation of this class will be used globally so methods 19 | on this class will be invoked concurrently from multiple threads so 20 | all functionality must be thread-safe. 21 | 22 | Methods are also invoked synchronously and will add to execution time 23 | of the commands so all behavior should be fast. If anything 24 | time-consuming is to be done it should be spawned asynchronously 25 | onto separate worker threads. 26 | """ 27 | 28 | def mark_event(self, event_type, command_name): 29 | """ Called for every event fired. 30 | 31 | This is the default Implementation and does nothing 32 | 33 | Args: 34 | event_type: A :class:hystrix.event_type.EventType` occurred 35 | during execution. 36 | command_key: Command instance name. 37 | """ 38 | 39 | # Do nothing 40 | pass 41 | 42 | def mark_command_execution(self, command_name, isolation_strategy, duration, events_type): 43 | """ Called after a command is executed using thread isolation. 44 | 45 | Will not get called if a command is rejected, short-circuited etc. 46 | 47 | This is the default Implementation and does nothing 48 | 49 | Args: 50 | command_key: Command instance name. 51 | isolation_strategy: :class:`ExecutionIsolationStrategy` the 52 | isolation strategy used by the command when executed 53 | duration: Time in milliseconds of executing 54 | :meth:`hystrix.command.Command.run()` method. 55 | events_type: A list of :class:hystrix.event_type.EventType` of events 56 | occurred during execution. 57 | """ 58 | 59 | # Do nothing 60 | pass 61 | -------------------------------------------------------------------------------- /hystrix/strategy/eventnotifier/event_notifier_default.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | import logging 3 | 4 | from hystrix.strategy.eventnotifier.event_notifier import AbstractBaseEventNotifier 5 | 6 | log = logging.getLogger(__name__) 7 | 8 | 9 | class EventNotifierDefault(AbstractBaseEventNotifier): 10 | """ 11 | Default implementations of :class:`AbstractBaseEventNotifier` that does nothing. 12 | """ 13 | 14 | INSTANCE = None 15 | 16 | @classmethod 17 | def get_instance(klass): 18 | if not klass.INSTANCE: 19 | klass.INSTANCE = klass() 20 | return klass.INSTANCE 21 | -------------------------------------------------------------------------------- /repos.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | git remote add github git@github.com:wiliamsouza/hystrix-py.git 4 | git remote add bitbucket git@bitbucket.org:wiliamsouza/hystrix-py.git 5 | git remote add gitlab git@gitlab.com:wiliamsouza/hystrix-py.git 6 | git remote set-url --push --add origin git@bitbucket.org:wiliamsouza/hystrix-py.git 7 | git remote set-url --push --add origin git@gitlab.com:wiliamsouza/hystrix-py.git 8 | git remote set-url --push --add origin git@github.com:wiliamsouza/hystrix-py.git 9 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [bdist_wheel] 2 | # This flag says that the code is written to work on both Python 2 and Python 3 | # 3. If at all possible, it is good practice to do this. If you cannot, you 4 | # will need to generate wheels for each Python version that you support. 5 | universal=1 6 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | import sys 4 | import codecs 5 | 6 | from setuptools import setup, find_packages, Command 7 | from setuptools.command.test import test as TestCommand 8 | 9 | 10 | here = os.path.abspath(os.path.dirname(__file__)) 11 | 12 | setup_requires = ['pytest', 'tox'] 13 | install_requires = ['six', 'tox', 'atomos'] 14 | tests_require = ['six', 'pytest-cov', 'pytest-cache', 'pytest-timeout'] 15 | dev_requires = ['pyflakes', 'pep8', 'pylint', 'check-manifest', 16 | 'ipython', 'ipdb', 'sphinx', 'sphinx_rtd_theme', 17 | 'sphinxcontrib-napoleon'] 18 | dev_requires.append(tests_require) 19 | 20 | PY2 = sys.version_info.major is 2 21 | PY3 = sys.version_info.major is 3 22 | 23 | if PY2: 24 | install_requires.append('futures') 25 | install_requires.append('enum34') 26 | 27 | if PY3: 28 | install_requires.append('enum34') 29 | 30 | version = "0.0.0" 31 | changes = os.path.join(here, "CHANGES.md") 32 | match = '^#*\s*(?P[0-9]+\.[0-9]+(\.[0-9]+)?)$' 33 | with codecs.open(changes, encoding='utf-8') as changes: 34 | for line in changes: 35 | match = re.match(match, line) 36 | if match: 37 | version = match.group("version") 38 | break 39 | 40 | # Get the long description 41 | with codecs.open(os.path.join(here, 'README.md'), encoding='utf-8') as f: 42 | long_description = f.read() 43 | 44 | # Get version 45 | with codecs.open(os.path.join(here, 'CHANGES.md'), encoding='utf-8') as f: 46 | changelog = f.read() 47 | 48 | 49 | class VersionCommand(Command): 50 | description = "print library version" 51 | user_options = [] 52 | 53 | def initialize_options(self): 54 | pass 55 | 56 | def finalize_options(self): 57 | pass 58 | 59 | def run(self): 60 | print(version) 61 | 62 | 63 | class PyTest(TestCommand): 64 | def finalize_options(self): 65 | TestCommand.finalize_options(self) 66 | self.test_args = ['--strict', '--verbose', '--tb=long', 67 | '--cov', 'hystrix', '--cov-report', 68 | 'term-missing', 'tests'] 69 | self.test_suite = True 70 | 71 | def run_tests(self): 72 | import pytest 73 | errno = pytest.main(self.test_args) 74 | sys.exit(errno) 75 | 76 | 77 | class Tox(TestCommand): 78 | user_options = [('tox-args=', 'a', "Arguments to pass to tox")] 79 | 80 | def initialize_options(self): 81 | TestCommand.initialize_options(self) 82 | self.tox_args = None 83 | 84 | def finalize_options(self): 85 | TestCommand.finalize_options(self) 86 | self.test_args = [] 87 | self.test_suite = True 88 | 89 | def run_tests(self): 90 | # import here, cause outside the eggs aren't loaded 91 | import tox 92 | import shlex 93 | args = self.tox_args 94 | if args: 95 | args = shlex.split(self.tox_args) 96 | errno = tox.cmdline(args=args) 97 | sys.exit(errno) 98 | 99 | 100 | setup( 101 | name='hystrix-py', 102 | version='0.1.0', 103 | description='A Netflix Hystrix implementation in Python', 104 | long_description=long_description, 105 | url='https://github.com/wiliamsouza/hystrix-py', 106 | author='The Hystrix Python Authors', 107 | author_email='wiliamsouza83@gmail.com', 108 | license='Apache Software License 2.0', 109 | classifiers=[ 110 | 'Development Status :: 3 - Alpha', 111 | 'Intended Audience :: Developers', 112 | 'Topic :: Software Development :: Library', 113 | 'License :: OSI Approved :: Apache Software License 2.0', 114 | 'Programming Language :: Python :: 2', 115 | 'Programming Language :: Python :: 2.7', 116 | 'Programming Language :: Python :: 3', 117 | 'Programming Language :: Python :: 3.3', 118 | 'Programming Language :: Python :: 3.4', 119 | ], 120 | keywords='sample setuptools development', 121 | packages=find_packages(exclude=['docs', 'tests']), 122 | setup_requires=setup_requires, 123 | install_requires=install_requires, 124 | tests_require=tests_require, 125 | extras_require={ 126 | 'dev': dev_requires, 127 | 'test': tests_require, 128 | }, 129 | cmdclass={ 130 | "version": VersionCommand, 131 | 'test': PyTest, 132 | "tox": Tox, 133 | }, 134 | ) 135 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/wiliamsouza/hystrix-py/9876b39980bc8dcb334fcb0ee8c15d6949112203/tests/__init__.py -------------------------------------------------------------------------------- /tests/test_circuitbreaker.py: -------------------------------------------------------------------------------- 1 | from hystrix.circuitbreaker import CircuitBreaker 2 | 3 | 4 | def test_default_circuitbreakername(): 5 | class Test(CircuitBreaker): 6 | pass 7 | 8 | circuitbreaker = Test() 9 | assert circuitbreaker.circuit_breaker_name == 'TestCircuitBreaker' 10 | 11 | 12 | def test_manual_circuitbreakername(): 13 | class Test(CircuitBreaker): 14 | __circuit_breaker_name__ = 'MyTestCircuitBreaker' 15 | pass 16 | 17 | circuitbreaker = Test() 18 | assert circuitbreaker.circuit_breaker_name == 'MyTestCircuitBreaker' 19 | -------------------------------------------------------------------------------- /tests/test_command.py: -------------------------------------------------------------------------------- 1 | from hystrix.command import Command 2 | 3 | import pytest 4 | 5 | 6 | class HelloCommand(Command): 7 | def run(self): 8 | return 'Hello Run' 9 | 10 | 11 | class FallbackCommand(Command): 12 | def run(self): 13 | raise RuntimeError('This command always fails') 14 | 15 | def fallback(self): 16 | return 'Hello Fallback' 17 | 18 | 19 | class CacheCommand(Command): 20 | def run(self): 21 | raise RuntimeError('This command always fails') 22 | 23 | def fallback(self): 24 | raise RuntimeError('This command always fails') 25 | 26 | def cache(self): 27 | return 'Hello Cache' 28 | 29 | 30 | def test_not_implemented_error(): 31 | class NotImplementedCommand(Command): 32 | pass 33 | 34 | command = NotImplementedCommand() 35 | 36 | with pytest.raises(RuntimeError): 37 | command.run() 38 | 39 | with pytest.raises(RuntimeError): 40 | command.fallback() 41 | 42 | with pytest.raises(RuntimeError): 43 | command.cache() 44 | 45 | 46 | def test_default_groupname(): 47 | class RunCommand(Command): 48 | pass 49 | 50 | command = RunCommand() 51 | assert command.group_key == 'RunCommandGroup' 52 | 53 | 54 | def test_manual_groupname(): 55 | class RunCommand(Command): 56 | group_key = 'MyRunGroup' 57 | pass 58 | 59 | command = RunCommand() 60 | assert command.group_key == 'MyRunGroup' 61 | 62 | 63 | def test_command_hello_synchronous(): 64 | command = HelloCommand() 65 | result = command.execute() 66 | assert 'Hello Run' == result 67 | 68 | 69 | def test_command_hello_asynchronous(): 70 | command = HelloCommand() 71 | future = command.queue() 72 | assert 'Hello Run' == future.result() 73 | 74 | 75 | def test_command_hello_callback(): 76 | command = HelloCommand() 77 | future = command.observe() 78 | assert 'Hello Run' == future.result() 79 | 80 | 81 | def test_command_hello_fallback_synchronous(): 82 | command = FallbackCommand() 83 | result = command.execute() 84 | assert 'Hello Fallback' == result 85 | 86 | 87 | def test_command_hello_fallback_asynchronous(): 88 | command = FallbackCommand() 89 | future = command.queue() 90 | assert 'Hello Fallback' == future.result() 91 | 92 | 93 | def test_command_hello_fallback_callback(): 94 | command = FallbackCommand() 95 | future = command.observe() 96 | assert 'Hello Fallback' == future.result() 97 | 98 | 99 | def test_command_hello_cache_synchronous(): 100 | command = CacheCommand() 101 | result = command.execute() 102 | assert 'Hello Cache' == result 103 | 104 | 105 | def test_command_hello_cache_asynchronous(): 106 | command = CacheCommand() 107 | future = command.queue() 108 | assert 'Hello Cache' == future.result() 109 | 110 | 111 | def test_command_hello_cache_callback(): 112 | command = CacheCommand() 113 | future = command.observe() 114 | assert 'Hello Cache' == future.result() 115 | -------------------------------------------------------------------------------- /tests/test_command_metrics.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | from hystrix.command import Command 4 | from hystrix.command_metrics import CommandMetrics 5 | from hystrix.command_properties import CommandProperties 6 | from hystrix.strategy.eventnotifier.event_notifier_default import ( 7 | EventNotifierDefault) 8 | 9 | from .test_command_properties import get_unit_test_properties_setter, as_mock 10 | 11 | setter = CommandProperties.setter() 12 | properties = CommandProperties('TEST', setter, 'unit_test_prefix') 13 | event_notifier = EventNotifierDefault.get_instance() 14 | 15 | 16 | def test_default_command_metrics_key(): 17 | class Test(CommandMetrics): 18 | pass 19 | 20 | commandmetrics = Test(None, 'command_group', None, properties, 21 | event_notifier) 22 | assert commandmetrics.command_metrics_key == 'TestCommandMetrics' 23 | 24 | 25 | def test_manual_command_metrics_key(): 26 | class Test(CommandMetrics): 27 | command_metrics_key = 'MyTestCommandMetrics' 28 | pass 29 | 30 | commandmetrics = Test(None, 'command_group', None, properties, 31 | event_notifier) 32 | assert commandmetrics.command_metrics_key == 'MyTestCommandMetrics' 33 | 34 | 35 | def test_error_percentage(): 36 | properties = get_unit_test_properties_setter() 37 | metrics = get_metrics(properties) 38 | 39 | metrics.mark_success(100) 40 | assert 0 == metrics.health_counts().error_percentage() 41 | 42 | metrics.mark_failure(1000) 43 | assert 50 == metrics.health_counts().error_percentage() 44 | 45 | metrics.mark_success(100) 46 | metrics.mark_success(100) 47 | assert 25 == metrics.health_counts().error_percentage() 48 | 49 | metrics.mark_timeout(5000) 50 | metrics.mark_timeout(5000) 51 | assert 50 == metrics.health_counts().error_percentage() 52 | 53 | metrics.mark_success(100) 54 | metrics.mark_success(100) 55 | metrics.mark_success(100) 56 | 57 | # latent 58 | metrics.mark_success(5000) 59 | 60 | # 6 success + 1 latent success + 1 failure + 2 timeout = 10 total 61 | # latent success not considered error 62 | # error percentage = 1 failure + 2 timeout / 10 63 | assert 30 == metrics.health_counts().error_percentage() 64 | 65 | 66 | def test_bad_request_do_not_affect_error_percentage(): 67 | properties = get_unit_test_properties_setter() 68 | metrics = get_metrics(properties) 69 | 70 | metrics.mark_success(100) 71 | assert 0 == metrics.health_counts().error_percentage() 72 | 73 | metrics.mark_failure(1000) 74 | assert 50 == metrics.health_counts().error_percentage() 75 | 76 | metrics.mark_bad_request(1) 77 | metrics.mark_bad_request(2) 78 | assert 50 == metrics.health_counts().error_percentage() 79 | 80 | metrics.mark_failure(45) 81 | metrics.mark_failure(55) 82 | assert 75 == metrics.health_counts().error_percentage() 83 | 84 | 85 | """ 86 | def test_current_concurrent_exection_count(): 87 | class LatentCommand(Command): 88 | def __init__(self, duration): 89 | super().__init__(timeout=1000) 90 | self.duration = duration 91 | 92 | def run(self): 93 | time.sleep(self.duration) 94 | return True 95 | 96 | def fallback(self): 97 | return False 98 | 99 | metrics = None 100 | for _ in range(7): 101 | cmd = LatentCommand(400) 102 | if metrics is None: 103 | metrics = cmd.metrics 104 | cmd.queue() 105 | 106 | assert 8 == metrics.current_concurrent_execution_count() 107 | """ 108 | 109 | 110 | # TODO: Move this to utils.py file 111 | # Utility method for creating :class:`hystrix.command_metrics.CommandMetrics` 112 | # for unit tests. 113 | def get_metrics(setter): 114 | return CommandMetrics('command_test', 'command_test', None, 115 | as_mock(setter), EventNotifierDefault.get_instance()) 116 | -------------------------------------------------------------------------------- /tests/test_command_properties.py: -------------------------------------------------------------------------------- 1 | from hystrix.command_properties import CommandProperties 2 | 3 | 4 | # TODO: Move this to utils.py file 5 | # Utility method for creating baseline properties for unit tests. 6 | def get_unit_test_properties_setter(): 7 | return CommandProperties.setter() \ 8 | .with_execution_timeout_in_milliseconds(1000) \ 9 | .with_execution_isolation_strategy(0) \ 10 | .with_execution_isolation_thread_interrupt_on_timeout(True) \ 11 | .with_circuit_breaker_force_open(False) \ 12 | .with_circuit_breaker_error_threshold_percentage(40) \ 13 | .with_metrics_rolling_statistical_window_in_milliseconds(5000) \ 14 | .with_metrics_rolling_statistical_window_buckets(5) \ 15 | .with_circuit_breaker_request_volume_threshold(0) \ 16 | .with_circuit_breaker_sleep_window_in_milliseconds(5000000) \ 17 | .with_circuit_breaker_enabled(True) \ 18 | .with_request_log_enabled(True) \ 19 | .with_execution_isolation_semaphore_max_concurrent_requests(20) \ 20 | .with_fallback_isolation_semaphore_max_concurrent_requests(10) \ 21 | .with_fallback_enabled(True) \ 22 | .with_circuit_breaker_force_closed(False) \ 23 | .with_metrics_rolling_percentile_enabled(True) \ 24 | .with_request_cache_enabled(True) \ 25 | .with_metrics_rolling_percentile_window_in_milliseconds(60000) \ 26 | .with_metrics_rolling_percentile_window_buckets(12) \ 27 | .with_metrics_rolling_percentile_bucket_size(1000) \ 28 | .with_metrics_health_snapshot_interval_in_milliseconds(0) 29 | 30 | 31 | # TODO: Move this to utils.py file 32 | # Return a static representation of the properties with values from the Builder 33 | # so that UnitTests can create properties that are not affected by the actual 34 | # implementations which pick up their values dynamically. 35 | # NOTE: This only work because in CommandProperties the setter override should 36 | # take precedence over default_value 37 | def as_mock(setter): 38 | return CommandProperties('TEST', setter, 'unit_test_prefix') 39 | 40 | 41 | class PropertiesCommandTest(CommandProperties): 42 | 43 | def __init__(self, command_name, setter, property_prefix): 44 | super(PropertiesCommandTest, self).__init__(command_name, setter, property_prefix) 45 | 46 | 47 | def test_boolean_setter_override1(): 48 | setter = CommandProperties.setter().with_circuit_breaker_force_closed(True) 49 | properties = PropertiesCommandTest('TEST', setter, 'unitTestPrefix') 50 | 51 | # The setter override should take precedence over default_value 52 | assert True == properties.circuit_breaker_force_closed() 53 | 54 | 55 | def test_boolean_setter_override2(): 56 | setter = CommandProperties.setter().with_circuit_breaker_force_closed(False) 57 | properties = PropertiesCommandTest('TEST', setter, 'unitTestPrefix') 58 | 59 | # The setter override should take precedence over default 60 | assert False == properties.circuit_breaker_force_closed() 61 | 62 | 63 | def test_boolean_code_default(): 64 | setter = CommandProperties.setter() 65 | properties = PropertiesCommandTest('TEST', setter, 'unitTestPrefix') 66 | 67 | assert CommandProperties.default_circuit_breaker_force_closed == properties.circuit_breaker_force_closed() 68 | 69 | 70 | def test_integer_setter_override(): 71 | setter = CommandProperties.setter().with_metrics_rolling_statistical_window_in_milliseconds(5000) 72 | properties = PropertiesCommandTest('TEST', setter, 'unitTestPrefix') 73 | 74 | # The setter override should take precedence over default_value 75 | assert 5000 == properties.metrics_rolling_statistical_window_in_milliseconds() 76 | 77 | 78 | def test_integer_code_default(): 79 | setter = CommandProperties.setter() 80 | properties = PropertiesCommandTest('TEST', setter, 'unitTestPrefix') 81 | 82 | result1 = CommandProperties.default_metrics_rolling_statistical_window 83 | result2 = properties.metrics_rolling_statistical_window_in_milliseconds() 84 | assert result1 == result2 85 | -------------------------------------------------------------------------------- /tests/test_group.py: -------------------------------------------------------------------------------- 1 | from hystrix.group import Group 2 | 3 | 4 | def test_default_groupname(): 5 | class Test(Group): 6 | pass 7 | 8 | group = Test() 9 | assert group.group_key == 'TestGroup' 10 | 11 | 12 | def test_manual_groupname(): 13 | class Test(Group): 14 | group_key = 'MyTestGroup' 15 | pass 16 | 17 | group = Test() 18 | assert group.group_key == 'MyTestGroup' 19 | -------------------------------------------------------------------------------- /tests/test_metrics.py: -------------------------------------------------------------------------------- 1 | from hystrix.metrics import Metrics 2 | from hystrix.command_properties import CommandProperties 3 | from hystrix.rolling_number import RollingNumber, RollingNumberEvent 4 | 5 | setter = CommandProperties.setter() 6 | properties = CommandProperties('TEST', setter, 'unit_test_prefix') 7 | counter = RollingNumber(properties.metrics_rolling_statistical_window_in_milliseconds(), 8 | properties.metrics_rolling_statistical_window_buckets()) 9 | 10 | 11 | def test_metrics_cumulative_count(): 12 | metrics = Metrics(counter) 13 | assert metrics.cumulative_count(RollingNumberEvent.THREAD_MAX_ACTIVE) == 0 14 | 15 | 16 | def test_metrics_rolling_count(): 17 | metrics = Metrics(counter) 18 | assert metrics.rolling_count(RollingNumberEvent.SUCCESS) == 0 19 | -------------------------------------------------------------------------------- /tests/test_pool.py: -------------------------------------------------------------------------------- 1 | from hystrix.pool import Pool 2 | 3 | 4 | def test_default_poolname(): 5 | class Test(Pool): 6 | pass 7 | 8 | pool = Test() 9 | assert pool.pool_key == 'TestPool' 10 | 11 | 12 | def test_manual_poolname(): 13 | class Test(Pool): 14 | pool_key = 'MyTestPool' 15 | pass 16 | 17 | pool = Test() 18 | assert pool.pool_key == 'MyTestPool' 19 | -------------------------------------------------------------------------------- /tests/test_pool_metrics.py: -------------------------------------------------------------------------------- 1 | from hystrix.pool_metrics import PoolMetrics 2 | 3 | 4 | def test_default_pool_metrics_name(): 5 | class Test(PoolMetrics): 6 | pass 7 | 8 | poolmetrics = Test() 9 | assert poolmetrics.pool_metrics_key == 'TestPoolMetrics' 10 | 11 | 12 | def test_manual_pool_metrics_name(): 13 | class Test(PoolMetrics): 14 | pool_metrics_key = 'MyTestPoolMetrics' 15 | pass 16 | 17 | poolmetrics = Test() 18 | assert poolmetrics.pool_metrics_key == 'MyTestPoolMetrics' 19 | -------------------------------------------------------------------------------- /tests/test_rolling_number.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from .utils import MockedTime 4 | 5 | from hystrix.rolling_number import RollingNumber, RollingNumberEvent 6 | 7 | 8 | def test_create_buckets(): 9 | _time = MockedTime() 10 | counter = RollingNumber(200, 10, _time=_time) 11 | 12 | # confirm the initial settings 13 | assert counter.milliseconds == 200 14 | assert counter.bucket_numbers == 10 15 | assert counter.buckets_size_in_milliseconds() == 20 16 | 17 | # We start out with 0 buckets in the queue 18 | assert counter.buckets.size == 0 19 | 20 | # add a success in each interval which should result in all 10 buckets 21 | # being created with 1 success in each 22 | for r in range(counter.bucket_numbers): 23 | counter.increment(RollingNumberEvent.SUCCESS) 24 | _time.increment(counter.buckets_size_in_milliseconds()) 25 | 26 | # confirm we have all 10 buckets 27 | assert counter.buckets.size == 10 28 | 29 | # add 1 more and we should still only have 10 buckets since that's the max 30 | counter.increment(RollingNumberEvent.SUCCESS) 31 | assert counter.buckets.size == 10 32 | 33 | 34 | def test_reset_buckets(): 35 | _time = MockedTime() 36 | counter = RollingNumber(200, 10, _time=_time) 37 | 38 | # We start out with 0 buckets in the queue 39 | assert counter.buckets.size == 0 40 | 41 | # Add 1 42 | counter.increment(RollingNumberEvent.SUCCESS) 43 | 44 | # Confirm we have 1 bucket 45 | assert counter.buckets.size == 1 46 | 47 | # Confirm we still have 1 bucket 48 | assert counter.buckets.size == 1 49 | 50 | # Add 1 51 | counter.increment(RollingNumberEvent.SUCCESS) 52 | 53 | # We should now have a single bucket with no values in it instead of 2 or 54 | # more buckets 55 | assert counter.buckets.size == 1 56 | 57 | 58 | def test_empty_buckets_fill_in(): 59 | _time = MockedTime() 60 | counter = RollingNumber(200, 10, _time=_time) 61 | 62 | # We start out with 0 buckets in the queue 63 | assert counter.buckets.size == 0 64 | 65 | # Add 1 66 | counter.increment(RollingNumberEvent.SUCCESS) 67 | 68 | # Confirm we have 1 bucket 69 | assert counter.buckets.size == 1 70 | 71 | # Wait past 3 bucket time periods (the 1st bucket then 2 empty ones) 72 | _time.increment(counter.buckets_size_in_milliseconds() * 3) 73 | 74 | # Add another 75 | counter.increment(RollingNumberEvent.SUCCESS) 76 | 77 | # We should have 4 (1 + 2 empty + 1 new one) buckets 78 | assert counter.buckets.size == 4 79 | 80 | 81 | def test_increment_in_single_bucket(): 82 | _time = MockedTime() 83 | counter = RollingNumber(200, 10, _time=_time) 84 | 85 | # We start out with 0 buckets in the queue 86 | assert counter.buckets.size == 0 87 | 88 | # Increment 89 | counter.increment(RollingNumberEvent.SUCCESS) 90 | counter.increment(RollingNumberEvent.SUCCESS) 91 | counter.increment(RollingNumberEvent.SUCCESS) 92 | counter.increment(RollingNumberEvent.SUCCESS) 93 | counter.increment(RollingNumberEvent.FAILURE) 94 | counter.increment(RollingNumberEvent.FAILURE) 95 | counter.increment(RollingNumberEvent.TIMEOUT) 96 | 97 | # Confirm we have 1 bucket 98 | assert counter.buckets.size == 1 99 | 100 | # The count should match 101 | assert counter.buckets.last().adder(RollingNumberEvent.SUCCESS).sum() == 4 102 | assert counter.buckets.last().adder(RollingNumberEvent.FAILURE).sum() == 2 103 | assert counter.rolling_sum(RollingNumberEvent.FAILURE) == 2 104 | assert counter.buckets.last().adder(RollingNumberEvent.TIMEOUT).sum() == 1 105 | 106 | 107 | def test_increment_in_multiple_buckets(): 108 | _time = MockedTime() 109 | counter = RollingNumber(200, 10, _time=_time) 110 | 111 | # We start out with 0 buckets in the queue 112 | assert counter.buckets.size == 0 113 | 114 | # Increment 115 | counter.increment(RollingNumberEvent.SUCCESS) 116 | counter.increment(RollingNumberEvent.SUCCESS) 117 | counter.increment(RollingNumberEvent.SUCCESS) 118 | counter.increment(RollingNumberEvent.SUCCESS) 119 | counter.increment(RollingNumberEvent.FAILURE) 120 | counter.increment(RollingNumberEvent.FAILURE) 121 | counter.increment(RollingNumberEvent.TIMEOUT) 122 | counter.increment(RollingNumberEvent.TIMEOUT) 123 | counter.increment(RollingNumberEvent.SHORT_CIRCUITED) 124 | 125 | # Sleep to get to a new bucket 126 | _time.increment(counter.buckets_size_in_milliseconds() * 3) 127 | 128 | # Increment 129 | counter.increment(RollingNumberEvent.SUCCESS) 130 | counter.increment(RollingNumberEvent.SUCCESS) 131 | counter.increment(RollingNumberEvent.FAILURE) 132 | counter.increment(RollingNumberEvent.FAILURE) 133 | counter.increment(RollingNumberEvent.FAILURE) 134 | counter.increment(RollingNumberEvent.TIMEOUT) 135 | counter.increment(RollingNumberEvent.SHORT_CIRCUITED) 136 | 137 | # Confirm we have 4 bucket 138 | assert counter.buckets.size == 4 139 | 140 | # The count of the last buckets 141 | assert counter.buckets.last().adder(RollingNumberEvent.SUCCESS).sum() == 2 142 | assert counter.buckets.last().adder(RollingNumberEvent.FAILURE).sum() == 3 143 | assert counter.buckets.last().adder(RollingNumberEvent.TIMEOUT).sum() == 1 144 | assert counter.buckets.last().adder(RollingNumberEvent.SHORT_CIRCUITED).sum() == 1 145 | 146 | # The total count 147 | assert counter.rolling_sum(RollingNumberEvent.SUCCESS) == 6 148 | assert counter.rolling_sum(RollingNumberEvent.FAILURE) == 5 149 | assert counter.rolling_sum(RollingNumberEvent.TIMEOUT) == 3 150 | assert counter.rolling_sum(RollingNumberEvent.SHORT_CIRCUITED) == 2 151 | 152 | # Wait until window passes 153 | _time.increment(counter.milliseconds) 154 | 155 | # Increment 156 | counter.increment(RollingNumberEvent.SUCCESS) 157 | 158 | # The total count should now include only the last bucket after a reset 159 | # since the window passed 160 | assert counter.rolling_sum(RollingNumberEvent.SUCCESS) == 1 161 | assert counter.rolling_sum(RollingNumberEvent.FAILURE) == 0 162 | assert counter.rolling_sum(RollingNumberEvent.TIMEOUT) == 0 163 | assert counter.rolling_sum(RollingNumberEvent.SHORT_CIRCUITED) == 0 164 | 165 | 166 | def test_success(): 167 | counter_event(RollingNumberEvent.SUCCESS) 168 | 169 | 170 | def test_failure(): 171 | counter_event(RollingNumberEvent.FAILURE) 172 | 173 | 174 | def test_timeout(): 175 | counter_event(RollingNumberEvent.TIMEOUT) 176 | 177 | 178 | def test_short_circuited(): 179 | counter_event(RollingNumberEvent.SHORT_CIRCUITED) 180 | 181 | 182 | def test_thread_pool_rejected(): 183 | counter_event(RollingNumberEvent.THREAD_POOL_REJECTED) 184 | 185 | 186 | def test_fallback_success(): 187 | counter_event(RollingNumberEvent.FALLBACK_SUCCESS) 188 | 189 | 190 | def test_fallback_failure(): 191 | counter_event(RollingNumberEvent.FALLBACK_FAILURE) 192 | 193 | 194 | def test_fallback_regection(): 195 | counter_event(RollingNumberEvent.FALLBACK_REJECTION) 196 | 197 | 198 | def test_exception_throw(): 199 | counter_event(RollingNumberEvent.EXCEPTION_THROWN) 200 | 201 | 202 | def test_thread_execution(): 203 | counter_event(RollingNumberEvent.THREAD_EXECUTION) 204 | 205 | 206 | def test_collapsed(): 207 | counter_event(RollingNumberEvent.COLLAPSED) 208 | 209 | 210 | def test_response_from_cache(): 211 | counter_event(RollingNumberEvent.RESPONSE_FROM_CACHE) 212 | 213 | 214 | def test_counter_retrieval_refreshes_buckets(): 215 | _time = MockedTime() 216 | counter = RollingNumber(200, 10, _time=_time) 217 | 218 | # We start out with 0 buckets in the queue 219 | assert counter.buckets.size == 0 220 | 221 | # Increment 222 | counter.increment(RollingNumberEvent.SUCCESS) 223 | counter.increment(RollingNumberEvent.SUCCESS) 224 | counter.increment(RollingNumberEvent.SUCCESS) 225 | counter.increment(RollingNumberEvent.SUCCESS) 226 | counter.increment(RollingNumberEvent.FAILURE) 227 | counter.increment(RollingNumberEvent.FAILURE) 228 | 229 | # Sleep to get to a new bucketV 230 | _time.increment(counter.buckets_size_in_milliseconds() * 3) 231 | 232 | # We should have 1 bucket since nothing has triggered the update of 233 | # buckets in the elapsed time 234 | assert counter.buckets.size == 1 235 | 236 | # The total counts 237 | assert counter.rolling_sum(RollingNumberEvent.SUCCESS) == 4 238 | assert counter.rolling_sum(RollingNumberEvent.FAILURE) == 2 239 | 240 | # We should have 4 buckets as the counter should have triggered 241 | # the buckets being created to fill in time 242 | assert counter.buckets.size == 4 243 | 244 | # Wait until window passes 245 | _time.increment(counter.milliseconds) 246 | 247 | # The total counts should all be 0 (and the buckets cleared by the get, 248 | #not only increment) 249 | assert counter.rolling_sum(RollingNumberEvent.SUCCESS) == 0 250 | assert counter.rolling_sum(RollingNumberEvent.FAILURE) == 0 251 | 252 | # Increment 253 | counter.increment(RollingNumberEvent.SUCCESS) 254 | 255 | # The total count should now include only the last bucket after a reset 256 | # since the window passed 257 | assert counter.rolling_sum(RollingNumberEvent.SUCCESS) == 1 258 | assert counter.rolling_sum(RollingNumberEvent.FAILURE) == 0 259 | 260 | 261 | def test_update_max_1(): 262 | _time = MockedTime() 263 | counter = RollingNumber(200, 10, _time=_time) 264 | 265 | # We start out with 0 buckets in the queue 266 | assert counter.buckets.size == 0 267 | 268 | # Increment 269 | counter.update_rolling_max(RollingNumberEvent.THREAD_MAX_ACTIVE, 10) 270 | 271 | # We should have 1 272 | assert counter.buckets.size == 1 273 | 274 | # The count should be 10 275 | assert counter.buckets.last().max_updater(RollingNumberEvent.THREAD_MAX_ACTIVE).max() == 10 276 | assert counter.rolling_max(RollingNumberEvent.THREAD_MAX_ACTIVE) == 10 277 | 278 | # Sleep to get to a new bucket 279 | _time.increment(counter.buckets_size_in_milliseconds() * 3) 280 | 281 | # Increment again is latest bucket 282 | counter.update_rolling_max(RollingNumberEvent.THREAD_MAX_ACTIVE, 20) 283 | 284 | # We should have 4 285 | assert counter.buckets.size == 4 286 | 287 | # The max 288 | assert counter.buckets.last().max_updater(RollingNumberEvent.THREAD_MAX_ACTIVE).max() == 20 289 | 290 | # Count per buckets 291 | values = counter.values(RollingNumberEvent.THREAD_MAX_ACTIVE) 292 | assert values[0] == 20 # Latest bucket 293 | assert values[1] == 0 294 | assert values[2] == 0 295 | assert values[3] == 10 # Oldest bucket 296 | 297 | 298 | def test_update_max_2(): 299 | _time = MockedTime() 300 | counter = RollingNumber(200, 10, _time=_time) 301 | 302 | # We start out with 0 buckets in the queue 303 | assert counter.buckets.size == 0 304 | 305 | # Increment 306 | counter.update_rolling_max(RollingNumberEvent.THREAD_MAX_ACTIVE, 10) 307 | counter.update_rolling_max(RollingNumberEvent.THREAD_MAX_ACTIVE, 30) 308 | counter.update_rolling_max(RollingNumberEvent.THREAD_MAX_ACTIVE, 20) 309 | 310 | # We should have 1 311 | assert counter.buckets.size == 1 312 | 313 | # The count should be 30 314 | assert counter.buckets.last().max_updater(RollingNumberEvent.THREAD_MAX_ACTIVE).max() == 30 315 | assert counter.rolling_max(RollingNumberEvent.THREAD_MAX_ACTIVE) == 30 316 | 317 | # Sleep to get to a new bucket 318 | _time.increment(counter.buckets_size_in_milliseconds() * 3) 319 | 320 | # Increment again is latest bucket 321 | counter.update_rolling_max(RollingNumberEvent.THREAD_MAX_ACTIVE, 30) 322 | counter.update_rolling_max(RollingNumberEvent.THREAD_MAX_ACTIVE, 30) 323 | counter.update_rolling_max(RollingNumberEvent.THREAD_MAX_ACTIVE, 50) 324 | 325 | # We should have 4 326 | assert counter.buckets.size == 4 327 | 328 | # The count 329 | assert counter.buckets.last().max_updater(RollingNumberEvent.THREAD_MAX_ACTIVE).max() == 50 330 | assert counter.value_of_latest_bucket(RollingNumberEvent.THREAD_MAX_ACTIVE) == 50 331 | 332 | # Values per buckets 333 | values = counter.values(RollingNumberEvent.THREAD_MAX_ACTIVE) 334 | assert values[0] == 50 # Latest bucket 335 | assert values[1] == 0 336 | assert values[2] == 0 337 | assert values[3] == 30 # Oldest bucket 338 | 339 | 340 | def test_max_value(): 341 | _time = MockedTime() 342 | counter = RollingNumber(200, 10, _time=_time) 343 | # TODO: Change tests to use this aproache for events 344 | event = RollingNumberEvent.THREAD_MAX_ACTIVE 345 | 346 | # We start out with 0 buckets in the queue 347 | assert counter.buckets.size == 0 348 | 349 | # Increment 350 | counter.update_rolling_max(event, 10) 351 | 352 | # Sleep to get to a new bucket 353 | _time.increment(counter.buckets_size_in_milliseconds()) 354 | 355 | # Increment 356 | counter.update_rolling_max(event, 30) 357 | 358 | # Sleep to get to a new bucket 359 | _time.increment(counter.buckets_size_in_milliseconds()) 360 | 361 | # Increment 362 | counter.update_rolling_max(event, 40) 363 | 364 | # Sleep to get to a new bucket 365 | _time.increment(counter.buckets_size_in_milliseconds()) 366 | 367 | # Try Decrement 368 | counter.update_rolling_max(event, 15) 369 | 370 | # The count should be max 371 | counter.update_rolling_max(event, 40) 372 | 373 | 374 | def test_empty_sum(): 375 | _time = MockedTime() 376 | counter = RollingNumber(200, 10, _time=_time) 377 | event = RollingNumberEvent.COLLAPSED 378 | assert counter.rolling_sum(event) == 0 379 | 380 | 381 | def test_empty_max(): 382 | _time = MockedTime() 383 | counter = RollingNumber(200, 10, _time=_time) 384 | event = RollingNumberEvent.THREAD_MAX_ACTIVE 385 | assert counter.rolling_max(event) == 0 386 | 387 | 388 | def test_empty_latest_value(): 389 | _time = MockedTime() 390 | counter = RollingNumber(200, 10, _time=_time) 391 | event = RollingNumberEvent.THREAD_MAX_ACTIVE 392 | assert counter.value_of_latest_bucket(event) == 0 393 | 394 | 395 | def test_rolling(): 396 | _time = MockedTime() 397 | counter = RollingNumber(20, 2, _time=_time) 398 | event = RollingNumberEvent.THREAD_MAX_ACTIVE 399 | 400 | assert counter.cumulative_sum(event) == 0 401 | 402 | # Iterate over 20 buckets on a queue sized for 2 403 | for i in range(20): 404 | counter.current_bucket() 405 | _time.increment(counter.buckets_size_in_milliseconds()) 406 | 407 | assert len(counter.values(event)) == 2 408 | 409 | counter.value_of_latest_bucket(event) 410 | 411 | 412 | def test_cumulative_counter_after_rolling(): 413 | _time = MockedTime() 414 | counter = RollingNumber(20, 2, _time=_time) 415 | event = RollingNumberEvent.SUCCESS 416 | 417 | assert counter.cumulative_sum(event) == 0 418 | 419 | # Iterate over 20 buckets on a queue sized for 2 420 | for i in range(20): 421 | counter.increment(event) 422 | _time.increment(counter.buckets_size_in_milliseconds()) 423 | 424 | assert len(counter.values(event)) == 2 425 | 426 | counter.value_of_latest_bucket(event) 427 | 428 | # Cumulative count should be 20 (for the number of loops above) regardless 429 | # of buckets rolling 430 | assert counter.cumulative_sum(event) == 20 431 | 432 | 433 | def test_cumulative_counter_after_rolling_and_reset(): 434 | _time = MockedTime() 435 | counter = RollingNumber(20, 2, _time=_time) 436 | event = RollingNumberEvent.SUCCESS 437 | 438 | assert counter.cumulative_sum(event) == 0 439 | 440 | # Iterate over 20 buckets on a queue sized for 2 441 | for i in range(20): 442 | counter.increment(event) 443 | _time.increment(counter.buckets_size_in_milliseconds()) 444 | 445 | assert len(counter.values(event)) == 2 446 | 447 | counter.value_of_latest_bucket(event) 448 | 449 | # simulate a reset occurring every once in a while 450 | # so we ensure the absolute sum is handling it okay 451 | if i == 5 or i == 15: 452 | counter.reset() 453 | 454 | # Cumulative count should be 20 (for the number of loops above) regardless 455 | # of buckets rolling 456 | assert counter.cumulative_sum(event) == 20 457 | 458 | 459 | def test_cumulative_counter_after_rolling_and_reset2(): 460 | _time = MockedTime() 461 | counter = RollingNumber(20, 2, _time=_time) 462 | event = RollingNumberEvent.SUCCESS 463 | 464 | assert counter.cumulative_sum(event) == 0 465 | 466 | counter.increment(event) 467 | counter.increment(event) 468 | counter.increment(event) 469 | 470 | # Iterate over 20 buckets on a queue sized for 2 471 | for i in range(20): 472 | _time.increment(counter.buckets_size_in_milliseconds()) 473 | 474 | # simulate a reset occurring every once in a while 475 | # so we ensure the absolute sum is handling it okay 476 | if i == 5 or i == 15: 477 | counter.reset() 478 | 479 | # No increments during the loop, just some before and after 480 | counter.increment(event) 481 | counter.increment(event) 482 | 483 | # Cumulative count should be 5 regardless of buckets rolling 484 | assert counter.cumulative_sum(event) == 5 485 | 486 | 487 | def test_cumulative_counter_after_rolling_and_reset3(): 488 | _time = MockedTime() 489 | counter = RollingNumber(20, 2, _time=_time) 490 | event = RollingNumberEvent.SUCCESS 491 | 492 | assert counter.cumulative_sum(event) == 0 493 | 494 | counter.increment(event) 495 | counter.increment(event) 496 | counter.increment(event) 497 | 498 | # Iterate over 20 buckets on a queue sized for 2 499 | for i in range(20): 500 | _time.increment(counter.buckets_size_in_milliseconds()) 501 | 502 | # Since we are rolling over the buckets it should reset naturally 503 | 504 | # No increments during the loop, just some before and after 505 | counter.increment(event) 506 | counter.increment(event) 507 | 508 | # Cumulative count should be 5 regardless of buckets rolling 509 | assert counter.cumulative_sum(event) == 5 510 | 511 | 512 | def test_milliseconds_buckets_size_error(): 513 | _time = MockedTime() 514 | 515 | with pytest.raises(Exception): 516 | RollingNumber(100, 11, _time=_time) 517 | 518 | 519 | def test_rolling_number_event_is_counter(): 520 | event = RollingNumberEvent(RollingNumberEvent.SUCCESS) 521 | assert event.is_counter() is True 522 | 523 | 524 | def test_rolling_number_event_is_max_updater(): 525 | event = RollingNumberEvent(RollingNumberEvent.THREAD_MAX_ACTIVE) 526 | assert event.is_max_updater() is True 527 | 528 | 529 | def counter_event(event): 530 | _time = MockedTime() 531 | counter = RollingNumber(200, 10, _time=_time) 532 | 533 | # We start out with 0 buckets in the queue 534 | assert counter.buckets.size == 0 535 | 536 | # We start out with 0 sum 537 | assert counter.rolling_sum(event) == 0 538 | 539 | # Increment 540 | counter.increment(event) 541 | 542 | # We shoud have 1 bucket 543 | assert counter.buckets.size == 1 544 | 545 | # The count should be 1 546 | assert counter.buckets.last().adder(event).sum() == 1 547 | assert counter.rolling_sum(event) == 1 548 | 549 | # Sleep to get to a new bucket 550 | _time.increment(counter.buckets_size_in_milliseconds() * 3) 551 | 552 | # Incremenet again in latest bucket 553 | counter.increment(event) 554 | 555 | # We should have 4 buckets 556 | assert counter.buckets.size == 4 557 | 558 | # The count of the last bucket 559 | assert counter.buckets.last().adder(event).sum() == 1 560 | 561 | # The total count 562 | assert counter.rolling_sum(event) == 2 563 | -------------------------------------------------------------------------------- /tests/test_rolling_percentile.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from .utils import MockedTime 4 | from .sample_data import sample_data_holder_1, sample_data_holder_2 5 | 6 | from hystrix.rolling_percentile import RollingPercentile, PercentileSnapshot 7 | 8 | 9 | def test_rolling(): 10 | time = MockedTime() 11 | percentile = RollingPercentile(time, 60000, 12, 1000, True) 12 | percentile.add_value(1000) 13 | percentile.add_value(1000) 14 | percentile.add_value(1000) 15 | percentile.add_value(2000) 16 | 17 | assert percentile.buckets.size == 1 18 | 19 | # No bucket turnover yet so percentile not yet generated 20 | assert percentile.percentile(50) == 0 21 | 22 | time.increment(6000) 23 | 24 | # Still only 1 bucket until we touch it again 25 | assert percentile.buckets.size == 1 26 | 27 | # A bucket has been created so we have a new percentile 28 | assert percentile.percentile(50) == 1000 29 | 30 | # Now 2 buckets since getting a percentile causes bucket retrieval 31 | assert percentile.buckets.size == 2 32 | 33 | percentile.add_value(1000) 34 | percentile.add_value(500) 35 | 36 | assert percentile.buckets.size == 2 37 | 38 | percentile.add_value(200) 39 | percentile.add_value(200) 40 | percentile.add_value(1600) 41 | percentile.add_value(200) 42 | percentile.add_value(1600) 43 | percentile.add_value(1600) 44 | 45 | # We haven't progressed to a new bucket so the percentile should be the 46 | # same and ignore the most recent bucket 47 | assert percentile.percentile(50) == 1000 48 | 49 | # Increment to another bucket so we include all of the above in the 50 | # PercentileSnapshot 51 | time.increment(6000) 52 | 53 | # The rolling version should have the same data as creating a snapshot 54 | # like this 55 | snapshot = PercentileSnapshot(1000, 1000, 1000, 2000, 1000, 500, 56 | 200, 200, 1600, 200, 1600, 1600) 57 | 58 | assert snapshot.percentile(0.15) == percentile.percentile(0.15) 59 | assert snapshot.percentile(0.50) == percentile.percentile(0.50) 60 | assert snapshot.percentile(0.90) == percentile.percentile(0.90) 61 | assert snapshot.percentile(0.995) == percentile.percentile(0.995) 62 | 63 | # mean = 1000+1000+1000+2000+1000+500+200+200+1600+200+1600+1600/12 64 | assert snapshot.mean() == 991 65 | 66 | 67 | def test_value_is_zero_after_rolling_window_passes_and_no_traffic(): 68 | time = MockedTime() 69 | percentile = RollingPercentile(time, 60000, 12, 1000, True) 70 | percentile.add_value(1000) 71 | percentile.add_value(1000) 72 | percentile.add_value(1000) 73 | percentile.add_value(2000) 74 | percentile.add_value(4000) 75 | 76 | assert percentile.buckets.size == 1 77 | 78 | # No bucket turnover yet so percentile not yet generated 79 | assert percentile.percentile(50) == 0 80 | 81 | time.increment(6000) 82 | 83 | # Still only 1 bucket until we touch it again 84 | assert percentile.buckets.size == 1 85 | 86 | # A bucket has been created so we have a new percentile 87 | assert percentile.percentile(50) == 1500 88 | 89 | # Let 1 minute pass 90 | time.increment(60000) 91 | 92 | # No data in a minute should mean all buckets are empty (or reset) so we 93 | # should not have any percentiles 94 | assert percentile.percentile(50) == 0 95 | 96 | 97 | def test_sample_data_over_time_1(): 98 | time = MockedTime() 99 | percentile = RollingPercentile(time, 60000, 12, 1000, True) 100 | previous_time = 0 101 | for time_millis, latency in sample_data_holder_1: 102 | time.increment(time_millis - previous_time) 103 | previous_time = time_millis 104 | percentile.add_value(latency) 105 | 106 | print('0.01', percentile.percentile(0.01)) 107 | print('Median', percentile.percentile(50)) 108 | print('90th', percentile.percentile(90)) 109 | print('99th', percentile.percentile(99)) 110 | print('99.5th', percentile.percentile(99.5)) 111 | print('99.99', percentile.percentile(99.99)) 112 | 113 | print('Median', percentile.percentile(50)) 114 | print('Median', percentile.percentile(50)) 115 | print('Median', percentile.percentile(50)) 116 | 117 | # In a loop as a use case was found where very different values were 118 | # calculated in subsequent requests. 119 | for _ in range(10): 120 | percentile50 = percentile.percentile(50) 121 | if percentile50 > 5: 122 | pytest.fail('We expect around 2 but got: {}'.format(percentile50)) 123 | 124 | percentile995 = percentile.percentile(99.5) 125 | if percentile995 < 20: 126 | msg = 'We expect to see some high values over 20 but got: {}' 127 | pytest.fail(msg.format(percentile995)) 128 | 129 | 130 | def test_sample_data_over_time_2(): 131 | time = MockedTime() 132 | percentile = RollingPercentile(time, 60000, 12, 1000, True) 133 | previous_time = 0 134 | for time_millis, latency in sample_data_holder_2: 135 | time.increment(time_millis - previous_time) 136 | previous_time = time_millis 137 | percentile.add_value(latency) 138 | 139 | print('0.01', percentile.percentile(0.01)) 140 | print('Median', percentile.percentile(50)) 141 | print('90th', percentile.percentile(90)) 142 | print('99th', percentile.percentile(99)) 143 | print('99.5th', percentile.percentile(99.5)) 144 | print('99.99', percentile.percentile(99.99)) 145 | 146 | percentile50 = percentile.percentile(50) 147 | if percentile50 > 90 or percentile50 < 50: 148 | pytest.fail('We expect around 60-70 but got: {}'.format(percentile50)) 149 | 150 | percentile99 = percentile.percentile(99) 151 | if percentile99 < 400: 152 | msg = 'We expect to see some high values over 400 but got: {}' 153 | pytest.fail(msg.format(percentile99)) 154 | 155 | 156 | def test_percentile_algorithm_media1(): 157 | snapshot = PercentileSnapshot(100, 100, 100, 100, 200, 200, 158 | 200, 300, 300, 300, 300) 159 | assert snapshot.percentile(50) == 200 160 | 161 | 162 | def test_percentile_algorithm_media2(): 163 | snapshot = PercentileSnapshot(100, 100, 100, 100, 100, 100, 164 | 100, 100, 100, 100, 500) 165 | assert snapshot.percentile(50) == 100 166 | 167 | 168 | def test_percentile_algorithm_media3(): 169 | snapshot = PercentileSnapshot(50, 75, 100, 125, 160, 170, 170 | 180, 200, 210, 300, 500) 171 | assert snapshot.percentile(50) == 175 172 | 173 | 174 | def test_percentile_algorithm_media4(): 175 | ''' Unsorted so it is expected to sort it for us. ''' 176 | snapshot = PercentileSnapshot(300, 75, 125, 500, 100, 160, 177 | 180, 200, 210, 50, 170) 178 | assert snapshot.percentile(50) == 175 179 | 180 | 181 | def test_percentile_algorithm_extremes(): 182 | ''' Unsorted so it is expected to sort it for us. ''' 183 | snapshot = PercentileSnapshot(2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 184 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 185 | 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 186 | 800, 768, 657, 700, 867) 187 | 188 | print('0.01', snapshot.percentile(0.01)) 189 | print('10th', snapshot.percentile(10)) 190 | print('Median', snapshot.percentile(50)) 191 | print('75th', snapshot.percentile(75)) 192 | print('90th', snapshot.percentile(90)) 193 | print('99th', snapshot.percentile(99)) 194 | print('99.5th', snapshot.percentile(99.5)) 195 | print('99.99', snapshot.percentile(99.99)) 196 | 197 | assert snapshot.percentile(50) == 2 198 | assert snapshot.percentile(10) == 2 199 | assert snapshot.percentile(75) == 2 200 | 201 | if snapshot.percentile(95) < 600: 202 | msg = 'We expect 90th to be over 600 to show the extremes but got: {}' 203 | pytest.fail(msg.format(snapshot.percentile(95))) 204 | 205 | if snapshot.percentile(99) < 600: 206 | msg = 'We expect 99th to be over 600 to show the extremes but got: {}' 207 | pytest.fail(msg.format(snapshot.percentile(99))) 208 | 209 | 210 | def percentile_for_values(*values): 211 | return PercentileSnapshot(*values) 212 | 213 | 214 | def test_percentile_algorithm_high_percentile(): 215 | snapshot = percentile_for_values(1, 2, 3) 216 | assert snapshot.percentile(50) == 2 217 | assert snapshot.percentile(75) == 3 218 | 219 | 220 | def test_percentile_algorithm_low_percentile(): 221 | snapshot = percentile_for_values(1, 2) 222 | assert snapshot.percentile(25) == 1 223 | assert snapshot.percentile(75) == 2 224 | 225 | 226 | def test_percentile_algorithm_percentiles(): 227 | snapshot = percentile_for_values(10, 30, 20, 40) 228 | 229 | assert snapshot.percentile(30) == 22 230 | assert snapshot.percentile(25) == 20 231 | assert snapshot.percentile(75) == 40 232 | assert snapshot.percentile(50) == 30 233 | 234 | assert snapshot.percentile(-1) == 10 235 | assert snapshot.percentile(101) == 40 236 | 237 | 238 | def test_percentile_algorithm_NIST_example(): 239 | snapshot = percentile_for_values(951772, 951567, 951937, 951959, 951442, 240 | 950610, 951591, 951195, 951772, 950925, 241 | 951990, 951682) 242 | assert snapshot.percentile(90) == 951983 243 | assert snapshot.percentile(100) == 951990 244 | 245 | 246 | def test_does_nothing_when_disabled(): 247 | time = MockedTime() 248 | percentile = RollingPercentile(time, 60000, 12, 1000, False) 249 | previous_time = 0 250 | for time_millis, latency in sample_data_holder_2: 251 | time.increment(time_millis - previous_time) 252 | previous_time = time_millis 253 | percentile.add_value(latency) 254 | 255 | assert percentile.percentile(50) == -1 256 | assert percentile.percentile(75) == -1 257 | assert percentile.mean() == -1 258 | -------------------------------------------------------------------------------- /tests/utils.py: -------------------------------------------------------------------------------- 1 | from atomos.multiprocessing.atomic import AtomicFloat 2 | 3 | 4 | class MockedTime(): 5 | 6 | def __init__(self): 7 | self._time = AtomicFloat(value=0) 8 | 9 | def current_time_in_millis(self): 10 | return self._time.get() 11 | 12 | def increment(self, millis): 13 | return self._time.add_and_get(millis) 14 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py33,py34,py35,py36 3 | 4 | [testenv] 5 | commands = python setup.py test 6 | 7 | [testenv:py33] 8 | basepython = python3.3 9 | deps = 10 | pytest 11 | six 12 | pytest-cov 13 | pytest-cache 14 | pytest-timeout 15 | commands = py.test --strict --verbose --tb=long --cov hystrix --cov-report term-missing tests 16 | 17 | [testenv:py34] 18 | basepython = python3.4 19 | 20 | [testenv:py35] 21 | basepython = python3.5 22 | 23 | [testenv:py36] 24 | basepython = python3.6 25 | 26 | [testenv:docs] 27 | changedir = docs 28 | deps = 29 | sphinx 30 | sphinx_rtd_theme 31 | sphinxcontrib-napoleon 32 | commands = 33 | /usr/bin/make clean html 34 | --------------------------------------------------------------------------------