├── README ├── tests ├── __init__.py ├── test_misc.py ├── test_mysql.py ├── test_spool.py ├── test_cache.py ├── test_callables.py ├── test_integration.py ├── test_application.py └── test_checker.py ├── requirements-py2.txt ├── MANIFEST.in ├── setup.cfg ├── TODO.md ├── requirements.txt ├── hacheck ├── __init__.py ├── config.py ├── compat.py ├── spool.py ├── cache.py ├── handlers.py ├── haupdown.py ├── main.py ├── mysql.py └── checker.py ├── .coveragerc ├── requirements-tests.txt ├── .gitignore ├── .travis.yml ├── LICENSE.txt ├── setup.py └── README.md /README: -------------------------------------------------------------------------------- 1 | README.md -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /requirements-py2.txt: -------------------------------------------------------------------------------- 1 | unittest2 2 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include *.txt 2 | include *.md 3 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length=120 3 | -------------------------------------------------------------------------------- /TODO.md: -------------------------------------------------------------------------------- 1 | * postgresql support 2 | * better logging 3 | * statsd integration? 4 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | tornado>=3.0.1,<4.2 2 | futures 3 | PyYAML>=3.0 4 | six>=1.4.0 5 | -------------------------------------------------------------------------------- /hacheck/__init__.py: -------------------------------------------------------------------------------- 1 | version_info = (0, 9, 0) 2 | __version__ = ".".join(map(str, version_info)) 3 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [report] 2 | 3 | exclude_lines = 4 | pragma: no cover 5 | if __name__ == .__main__.: 6 | -------------------------------------------------------------------------------- /requirements-tests.txt: -------------------------------------------------------------------------------- 1 | # Mocking stuff 2 | mock==1.0.1 3 | # Running tests 4 | nose==1.3.0 5 | nose-cov==1.6 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | build/ 3 | dist/ 4 | hacheck.egg-info/ 5 | .coverage 6 | spool/ 7 | env/ 8 | env26/ 9 | env27/ 10 | env32/ 11 | env33/ 12 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | sudo: false 2 | language: python 3 | python: 4 | - "2.6" 5 | - "2.7" 6 | - "3.2" 7 | - "3.3" 8 | - "3.4" 9 | env: 10 | - TORNADO_VERSION=3.0.1 11 | - TORNADO_VERSION=3.2.2 12 | - TORNADO_VERSION=4.0.2 13 | - TORNADO_VERSION=4.1.0 14 | install: 15 | - "pip install -r requirements.txt -r requirements-tests.txt --use-mirrors" 16 | - "if [[ ${TRAVIS_PYTHON_VERSION::1} != '3' ]]; then pip install -r requirements-py2.txt --use-mirrors; fi" 17 | - "pip install tornado==$TORNADO_VERSION" 18 | # only build master and PRs, not every branch 19 | branches: 20 | only: 21 | - master 22 | script: "nosetests" 23 | matrix: 24 | fast_finish: true 25 | -------------------------------------------------------------------------------- /tests/test_misc.py: -------------------------------------------------------------------------------- 1 | import resource 2 | import mock 3 | 4 | from unittest import TestCase 5 | 6 | from hacheck import main 7 | 8 | 9 | @mock.patch('resource.getrlimit', return_value=(10, 20)) 10 | @mock.patch('resource.setrlimit') 11 | class SetRLimitNOFILETestCase(TestCase): 12 | def test_max(self, mock_setrlimit, mock_getrlimit): 13 | main.setrlimit_nofile('max') 14 | mock_setrlimit.assert_called_once_with(resource.RLIMIT_NOFILE, (20, 20)) 15 | 16 | def test_specific(self, mock_setrlimit, mock_getrlimit): 17 | main.setrlimit_nofile(12) 18 | mock_setrlimit.assert_called_once_with(resource.RLIMIT_NOFILE, (12, 20)) 19 | 20 | def test_illegal(self, mock_setrlimit, mock_getrlimit): 21 | self.assertRaises(ValueError, main.setrlimit_nofile, 25) 22 | -------------------------------------------------------------------------------- /hacheck/config.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | 3 | 4 | def max_or_int(some_str_value): 5 | if some_str_value == 'max': 6 | return 'max' 7 | else: 8 | return int(some_str_value) 9 | 10 | 11 | DEFAULTS = { 12 | 'cache_time': (float, 10.0), 13 | 'service_name_header': (str, None), 14 | 'log_path': (str, 'stderr'), 15 | 'mysql_username': (str, None), 16 | 'mysql_password': (str, None), 17 | 'rlimit_nofile': (max_or_int, None) 18 | } 19 | 20 | 21 | config = {} 22 | for key, (_, default) in DEFAULTS.items(): 23 | config[key] = default 24 | 25 | 26 | def load_from(path): 27 | with open(path, 'r') as f: 28 | c = yaml.safe_load(f) 29 | for key, value in c.items(): 30 | if key in DEFAULTS: 31 | constructor, default = DEFAULTS[key] 32 | config[key] = constructor(value) 33 | return config 34 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | hacheck 2 | Copyright (c) 2013 Uber Technologies, Inc. 3 | The MIT License 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to 7 | deal in the Software without restriction, including without limitation the 8 | rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 9 | sell copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in 13 | all copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 | FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 | IN THE SOFTWARE. 22 | -------------------------------------------------------------------------------- /tests/test_mysql.py: -------------------------------------------------------------------------------- 1 | try: 2 | from unittest2 import TestCase 3 | except ImportError: 4 | from unittest import TestCase 5 | 6 | #import tornado.testing 7 | 8 | from hacheck import mysql 9 | 10 | 11 | class TestMySQLHelpers(TestCase): 12 | def test_sxor(self): 13 | self.assertEqual(b'\0\0', mysql._sxor(b'00', b'00')) 14 | self.assertEqual(b'\f\f', mysql._sxor(b'\f\f', b'\0\0')) 15 | self.assertEqual(b'\f\f', mysql._sxor(b'\0\0', b'\f\f')) 16 | self.assertEqual(b'\0\0', mysql._sxor(b'\f\f', b'\f\f')) 17 | self.assertEqual(b'\x1f\x0a\x1e\x00\x0b', mysql._sxor(b'hello', b'world')) 18 | 19 | def test_lenc(self): 20 | self.assertEqual((1, 1), mysql._read_lenc(b'\x01')) 21 | self.assertEqual((255, 3), mysql._read_lenc(b'\xfc\xff\x00')) 22 | self.assertEqual((16777215, 4), mysql._read_lenc(b'\xfd\xff\xff\xff')) 23 | self.assertEqual((4294967295, 9), mysql._read_lenc(b'\xfe\xff\xff\xff\xff\x00\x00\x00\x00')) 24 | 25 | def test_password_hash(self): 26 | self.assertEqual( 27 | b'\x19W\xdc\xe2rB\x82\xe0\x18\xf4\r\x90X$\xcbca\xf8\x8dA', 28 | mysql._stupid_hash_password('12345678901234567890', 'password') 29 | ) 30 | 31 | 32 | # TODO: Write unit tests of the actual protocol 33 | -------------------------------------------------------------------------------- /hacheck/compat.py: -------------------------------------------------------------------------------- 1 | """compatibility classes for py2.6, py3, or anything else strange""" 2 | 3 | import contextlib 4 | import collections 5 | import sys 6 | 7 | 8 | def Counter(*args): 9 | c = collections.defaultdict(lambda: 0) 10 | if args: 11 | c.update(args[0]) 12 | return c 13 | 14 | 15 | @contextlib.contextmanager 16 | def nested3(*managers): 17 | """Combine multiple context managers into a single nested context manager. 18 | 19 | This function has been deprecated in favour of the multiple manager form 20 | of the with statement. 21 | 22 | The one advantage of this function over the multiple manager form of the 23 | with statement is that argument unpacking allows it to be 24 | used with a variable number of context managers as follows: 25 | 26 | with nested(*managers): 27 | do_something() 28 | 29 | """ 30 | exits = [] 31 | vars = [] 32 | exc = (None, None, None) 33 | try: 34 | for mgr in managers: 35 | exit = mgr.__exit__ 36 | enter = mgr.__enter__ 37 | vars.append(enter()) 38 | exits.append(exit) 39 | yield vars 40 | except: 41 | exc = sys.exc_info() 42 | finally: 43 | while exits: 44 | exit = exits.pop() 45 | try: 46 | if exit(*exc): 47 | exc = (None, None, None) 48 | except: 49 | exc = sys.exc_info() 50 | if exc != (None, None, None): 51 | # Don't rely on sys.exc_info() still containing 52 | # the right information. Another exception may 53 | # have been raised and caught by an exit method 54 | raise exc[1] 55 | 56 | 57 | def bchr3(c): 58 | return bytes((c,)) 59 | 60 | def bchr2(c): 61 | return chr(c) 62 | 63 | 64 | if sys.version_info < (3, 0): 65 | nested = contextlib.nested 66 | bchr = bchr2 67 | else: 68 | nested = nested3 69 | bchr = bchr3 70 | -------------------------------------------------------------------------------- /hacheck/spool.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | config = { 4 | 'spool_root': None, 5 | } 6 | 7 | 8 | def configure(spool_root, needs_write=False): 9 | access_required = os.W_OK | os.R_OK if needs_write else os.R_OK 10 | if os.path.exists(spool_root): 11 | if not os.access(spool_root, access_required): 12 | raise ValueError("Insufficient access to %s" % spool_root) 13 | else: 14 | os.mkdir(spool_root, 0o750) 15 | config['spool_root'] = spool_root 16 | 17 | 18 | def is_up(service_name): 19 | """Check whether a service is asserted to be up or down. Includes the logic 20 | for checking system-wide all state 21 | 22 | :returns: (bool of service status, dict of extra information) 23 | """ 24 | all_up, all_info = status("all") 25 | if all_up: 26 | return status(service_name) 27 | else: 28 | return all_up, all_info 29 | 30 | 31 | def status(service_name): 32 | """Check whether a service is asserted to be up or down, without checking 33 | the system-wide 'all' state. 34 | 35 | :returns: (bool of service status, dict of extra information) 36 | """ 37 | try: 38 | with open(os.path.join(config['spool_root'], service_name), 'r') as f: 39 | reason = f.read() 40 | return False, {'service': service_name, 'reason': reason} 41 | except IOError: 42 | return True, {'service': service_name, 'reason': ''} 43 | 44 | 45 | def status_all_down(): 46 | """List all down services 47 | 48 | :returns: Iterable of pairs of (service name, dict of extra information) 49 | """ 50 | for service_name in os.listdir(config['spool_root']): 51 | up, info = status(service_name) 52 | if not up: 53 | yield service_name, info 54 | 55 | 56 | def up(service_name): 57 | try: 58 | os.unlink(os.path.join(config['spool_root'], service_name)) 59 | except OSError: 60 | pass 61 | 62 | 63 | def down(service_name, reason=""): 64 | with open(os.path.join(config['spool_root'], service_name), 'w') as f: 65 | f.write(reason) 66 | -------------------------------------------------------------------------------- /tests/test_spool.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | import mock 3 | import shutil 4 | import tempfile 5 | from unittest import TestCase 6 | 7 | from hacheck import spool 8 | 9 | se = mock.sentinel 10 | 11 | 12 | class TestSpool(TestCase): 13 | def setUp(self): 14 | self.root = tempfile.mkdtemp() 15 | spool.configure(self.root) 16 | 17 | def tearDown(self): 18 | shutil.rmtree(self.root) 19 | 20 | def test_configure_creates_root(self): 21 | spool.configure(os.path.join(self.root, 'spool')) 22 | assert os.path.exists, 'spool' 23 | 24 | def test_configure_no_write(self): 25 | new_root = os.path.join(self.root, 'non_writable') 26 | os.mkdir(new_root) 27 | os.chmod(new_root, 0o555) 28 | self.assertRaises(ValueError, spool.configure, new_root, needs_write=True) 29 | 30 | def test_configure_no_write_no_needs_write(self): 31 | new_root = os.path.join(self.root, 'non_writable') 32 | os.mkdir(new_root) 33 | os.chmod(new_root, 0o555) 34 | spool.configure(new_root, needs_write=False) 35 | 36 | def test_basic(self): 37 | svcname = 'test_basic' 38 | self.assertEquals(True, spool.status(svcname)[0]) 39 | spool.down(svcname) 40 | self.assertEquals(False, spool.status(svcname)[0]) 41 | self.assertEquals(False, spool.is_up(svcname)[0]) 42 | spool.up(svcname) 43 | self.assertEquals(True, spool.status(svcname)[0]) 44 | 45 | def test_all(self): 46 | svcname = 'test_all' 47 | self.assertEquals(True, spool.status(svcname)[0]) 48 | spool.down('all') 49 | self.assertEquals(True, spool.status(svcname)[0]) 50 | self.assertEquals(False, spool.is_up(svcname)[0]) 51 | 52 | def test_status_all_down(self): 53 | self.assertEqual(len(list(spool.status_all_down())), 0) 54 | spool.down('foo') 55 | self.assertEqual(list(spool.status_all_down()), [('foo', {'service': 'foo', 'reason': ''})]) 56 | 57 | def test_repeated_ups_works(self): 58 | spool.up('all') 59 | spool.up('all') 60 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import collections 3 | 4 | from setuptools import setup, find_packages 5 | from pip.req import parse_requirements 6 | 7 | 8 | def get_install_requirements(): 9 | 10 | ReqOpts = collections.namedtuple('ReqOpts', ['skip_requirements_regex', 'default_vcs']) 11 | 12 | opts = ReqOpts(None, 'git') 13 | 14 | requires = [] 15 | dependency_links = [] 16 | 17 | for ir in parse_requirements('requirements.txt', options=opts): 18 | if ir is not None: 19 | if ir.url is not None: 20 | dependency_links.append(str(ir.url)) 21 | if ir.req is not None: 22 | requires.append(str(ir.req)) 23 | return requires, dependency_links 24 | 25 | 26 | install_requires, dependency_links = get_install_requirements() 27 | 28 | setup( 29 | name="hacheck", 30 | version="0.12.0", 31 | author="James Brown", 32 | author_email="jbrown@uber.com", 33 | url="https://github.com/uber/hacheck", 34 | license="MIT", 35 | packages=find_packages(exclude=['tests']), 36 | keywords=["monitoring", "load-balancing", "networking"], 37 | description="HAProxy health-check proxying service", 38 | install_requires=install_requires, 39 | dependency_links=dependency_links, 40 | test_suite="nose.collector", 41 | entry_points={ 42 | 'console_scripts': [ 43 | 'haup = hacheck.haupdown:up', 44 | 'hadown = hacheck.haupdown:down', 45 | 'hashowdowned = hacheck.haupdown:status_downed', 46 | 'hastatus = hacheck.haupdown:status', 47 | 'halist = hacheck.haupdown:halist', 48 | 'hacheck = hacheck.main:main', 49 | ] 50 | }, 51 | classifiers=[ 52 | "Development Status :: 3 - Alpha", 53 | "Environment :: Web Environment", 54 | "Programming Language :: Python", 55 | "Programming Language :: Python :: 2.6", 56 | "Programming Language :: Python :: 2.7", 57 | "Programming Language :: Python :: 3.2", 58 | "Programming Language :: Python :: 3.3", 59 | "Programming Language :: Python :: 3.4", 60 | "Intended Audience :: Developers", 61 | "Operating System :: OS Independent", 62 | "License :: OSI Approved :: MIT License", 63 | "Topic :: System :: Monitoring", 64 | ] 65 | ) 66 | -------------------------------------------------------------------------------- /hacheck/cache.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | import copy 3 | import functools 4 | import time 5 | try: 6 | from collections import Counter 7 | except: 8 | from .compat import Counter 9 | from collections import namedtuple 10 | 11 | _cache = {} 12 | 13 | config = { 14 | 'cache_time': 10, 15 | 'ignore_cache': False, 16 | } 17 | 18 | default_stats = Counter({ 19 | 'expirations': 0, 20 | 'sets': 0, 21 | 'gets': 0, 22 | 'hits': 0, 23 | 'misses': 0 24 | }) 25 | 26 | stats = Counter() 27 | 28 | Key = namedtuple('Key', ['original_key']) 29 | Record = namedtuple('Record', ['expiry', 'value']) 30 | 31 | 32 | def configure(cache_time=config['cache_time']): 33 | """Configure the cache and reset its values""" 34 | config['cache_time'] = cache_time 35 | stats.clear() 36 | stats.update(default_stats) 37 | _cache.clear() 38 | 39 | 40 | def has_expired(record, now): 41 | if record.expiry < now: 42 | return True 43 | else: 44 | return False 45 | 46 | 47 | def getv(key, now=None): 48 | """Get a key from the cache 49 | 50 | :param now: The current time 51 | :raises: KeyError if the key is not present or has expired 52 | :returns: The result 53 | """ 54 | if now is None: 55 | now = time.time() 56 | key = Key(key) 57 | stats['gets'] += 1 58 | if key in _cache: 59 | record = _cache[key] 60 | if has_expired(record, now) or config['ignore_cache']: 61 | stats['expirations'] += 1 62 | del _cache[key] 63 | else: 64 | stats['hits'] += 1 65 | return record.value 66 | stats['misses'] += 1 67 | raise KeyError(key) 68 | 69 | 70 | def setv(key, value): 71 | key = Key(key) 72 | stats['sets'] += 1 73 | expiration_time = time.time() + config['cache_time'] 74 | rec = Record(expiration_time, value) 75 | _cache[key] = rec 76 | 77 | 78 | def get_stats(): 79 | return copy.copy(stats) 80 | 81 | 82 | @contextlib.contextmanager 83 | def maybe_bust(bust_or_not): 84 | previous_state = config['ignore_cache'] 85 | config['ignore_cache'] = bust_or_not 86 | yield 87 | config['ignore_cache'] = previous_state 88 | 89 | 90 | def cached(func): 91 | @functools.wraps(func) 92 | def wrapper(*args, **kwargs): 93 | now = time.time() 94 | key = tuple([func.__name__, args]) 95 | try: 96 | response = getv(key, now) 97 | except KeyError: 98 | response = func(*args, **kwargs) 99 | setv(key, response) 100 | return response 101 | return wrapper 102 | -------------------------------------------------------------------------------- /tests/test_cache.py: -------------------------------------------------------------------------------- 1 | import time 2 | import mock 3 | 4 | from unittest import TestCase 5 | 6 | from hacheck import cache 7 | 8 | se = mock.sentinel 9 | 10 | 11 | class CacheTestCase(TestCase): 12 | def setUp(self): 13 | cache.configure() 14 | 15 | def test_expiry(self): 16 | with mock.patch.object(cache, 'has_expired', return_value=False) as m: 17 | cache.setv(se.key, se.value) 18 | self.assertEqual(cache.getv(se.key), se.value) 19 | self.assertEqual(m.call_count, 1) 20 | with mock.patch.object(cache, 'has_expired', return_value=True) as m: 21 | cache.setv(se.key, se.value) 22 | self.assertRaises(KeyError, cache.getv, se.key) 23 | self.assertEqual(m.call_count, 1) 24 | 25 | def test_configure(self): 26 | cache.configure(cache_time=13) 27 | with mock.patch('time.time', return_value=1): 28 | cache.setv(se.key, se.value) 29 | with mock.patch.object(cache, 'has_expired', return_value=False) as m: 30 | cache.getv(se.key, time.time()) 31 | m.assert_called_once_with(cache.Record(14, mock.ANY), 1) 32 | 33 | def test_stats(self): 34 | with mock.patch.object(cache, 'has_expired', return_value=False): 35 | cache.setv(se.key, se.value) 36 | self.assertEqual(cache.get_stats()['sets'], 1) 37 | self.assertEqual(cache.get_stats()['gets'], 0) 38 | cache.getv(se.key) 39 | self.assertEqual(cache.get_stats()['gets'], 1) 40 | 41 | def test_stats_reset(self): 42 | self.assertEqual(cache.get_stats()['gets'], 0) 43 | self.assertRaises(KeyError, cache.getv, se.key) 44 | self.assertEqual(cache.get_stats()['gets'], 1) 45 | cache.configure() 46 | self.assertEqual(cache.get_stats()['gets'], 0) 47 | 48 | def test_has_expired(self): 49 | self.assertEqual(False, cache.has_expired(cache.Record(2, None), 1)) 50 | self.assertEqual(True, cache.has_expired(cache.Record(1, None), 2)) 51 | 52 | def test_busting(self): 53 | with mock.patch.object(cache, 'has_expired', return_value=False): 54 | cache.setv(se.key, se.value) 55 | with cache.maybe_bust(False): 56 | self.assertEqual(se.value, cache.getv(se.key)) 57 | with cache.maybe_bust(True): 58 | self.assertRaises(KeyError, cache.getv, se.key) 59 | 60 | def test_decorator(self): 61 | @cache.cached 62 | def inner(arg): 63 | return arg() 64 | m = mock.Mock(return_value=se.rv) 65 | self.assertEqual(se.rv, inner(m)) 66 | self.assertEqual(se.rv, inner(m)) 67 | m.assert_called_once_with() 68 | 69 | def test_decorator_expiration(self): 70 | @cache.cached 71 | def inner(arg): 72 | return arg() 73 | m = mock.Mock(return_value=se.rv) 74 | with mock.patch.object(cache, 'has_expired', return_value=True): 75 | self.assertEqual(se.rv, inner(m)) 76 | self.assertEqual(se.rv, inner(m)) 77 | self.assertEqual(2, m.call_count) 78 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://travis-ci.org/uber/hacheck.png)](https://travis-ci.org/uber/hacheck) 2 | 3 | **hacheck** is a healthcheck-proxying service. It listens on port 3333, speaks HTTP, and has the following API: 4 | 5 | GET //// 6 | 7 | This will check the following locations for service state: 8 | 9 | * `/var/spool/hacheck/all` 10 | * `/var/spool/hacheck/` 11 | * Depending on the value of ``: 12 | * if `http`: `http://localhost:/` 13 | * if `tcp`: will attempt to connect to port `` on localhost. `` is currently ignored 14 | * if `spool`: will only check the spool state 15 | * if `mysql` and the `mysql_username` and `mysql_password` are set, will do a login and quit on the requested mysql port; `` is ignored and no logical database is selected. 16 | 17 | When it does query the actual service check endpoint, **hacheck** MAY cache the value of that query for some amount of time 18 | 19 | **hacheck** also comes with the command-line utilities `haup`, `hadown`, and `hastatus`. These take a service name and manipulate the spool files, allowing you to pre-emptively mark a service as "up" or "down". 20 | 21 | ### Dependencies 22 | 23 | **hacheck** is written in Python and makes extensive use of the [tornado](http://www.tornadoweb.org/en/stable/) asynchronous web framework (specifically, it uses the coroutine stuff in Tornado 3). Unit tests use nose and mock. 24 | 25 | It runs on Python 2.6 and above, as well as Python 3.2 and above. 26 | 27 | ### Use cases 28 | 29 | Imagine you want to take down the server `web01` for maintenance. Just SSH to it, then (as root) run `hadown all` and wait however long your HAproxy healthchecking interval is. Do your maintenance, then run `haup all` to put it back in service. So easy! 30 | 31 | ### Configuration 32 | 33 | `hacheck` accepts a `-c` flag which should point to a YAML-formatted configuration file. Some notable properties of this file: 34 | * `cache_time`: The duration for which check results may be cached 35 | * `service_name_header`: If set, the name of a header which will be populated with the service name on HTTP checks 36 | * `log_path`: Either the string `"stdout"`, the string `"stderr"`, or a fully-qualified path to a file to write logs to. Uses a [WatchedFileHandler](http://docs.python.org/2/library/logging.handlers.html#watchedfilehandler) and ought to play nicely with logrotate 37 | * `mysql_username`: username to use when logging into mysql for checks 38 | * `mysql_password`: password to use when logging into mysql for checks 39 | * `rlimit_nofile`: set the NOFILE rlimit. If the string "max", will set the rlimit to the hard rlimit; otherwise, will be interpreted as an integer and set to that value. 40 | 41 | ### Monitoring 42 | 43 | `hacheck` exports some useful monitoring stuff at the `/status` endpoint. It also exports a count of requests by source-IP and service name on the `/status/count` endpoint. 44 | 45 | If the [mutornadomon](https://github.com/uber/mutornadomon) package is available, `hacheck` will import and use it, exposing standard stats about tornado to localhost at `/mutornadomon` 46 | 47 | ### License 48 | 49 | This work is licensed under the [MIT License](http://opensource.org/licenses/MIT), the contents of which can be found at [LICENSE.txt](LICENSE.txt). 50 | -------------------------------------------------------------------------------- /tests/test_callables.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | from hacheck.compat import nested 3 | 4 | import mock 5 | import json 6 | import os 7 | from unittest import TestCase 8 | 9 | import hacheck.haupdown 10 | import hacheck.spool 11 | 12 | # can't use an actual mock.sentinel because it doesn't support string ops 13 | sentinel_service_name = 'testing_service_name' 14 | 15 | 16 | class TestCallable(TestCase): 17 | @contextlib.contextmanager 18 | def setup_wrapper(self, args=frozenset()): 19 | with nested( 20 | mock.patch.object(hacheck, 'spool', return_value=(True, {})), 21 | mock.patch.object(hacheck.haupdown, 'print_s'), 22 | mock.patch('sys.argv', ['ignored'] + list(args)) 23 | ) as (mock_spool, mock_print, _1): 24 | yield mock_spool, mock_print 25 | 26 | def test_basic(self): 27 | with self.setup_wrapper() as (spooler, _): 28 | spooler.status.return_value = (True, {}) 29 | hacheck.haupdown.main('status_downed') 30 | spooler.configure.assert_called_once_with('/var/spool/hacheck', needs_write=False) 31 | 32 | def test_exit_codes(self): 33 | with self.setup_wrapper([sentinel_service_name]) as (spooler, mock_print): 34 | spooler.status.return_value = (True, {}) 35 | self.assertEqual(0, hacheck.haupdown.main('status')) 36 | mock_print.assert_any_call('UP\t%s', sentinel_service_name) 37 | spooler.status.return_value = (False, {'reason': 'irrelevant'}) 38 | self.assertEqual(1, hacheck.haupdown.main('status')) 39 | mock_print.assert_any_call('DOWN\t%s\t%s', sentinel_service_name, 'irrelevant') 40 | 41 | def test_up(self): 42 | with self.setup_wrapper([sentinel_service_name]) as (spooler, mock_print): 43 | hacheck.haupdown.up() 44 | spooler.up.assert_called_once_with(sentinel_service_name) 45 | self.assertEqual(mock_print.call_count, 0) 46 | 47 | def test_down(self): 48 | os.environ['SSH_USER'] = 'testyuser' 49 | os.environ['SUDO_USER'] = 'testyuser' 50 | with self.setup_wrapper([sentinel_service_name]) as (spooler, mock_print): 51 | hacheck.haupdown.down() 52 | spooler.down.assert_called_once_with(sentinel_service_name, 53 | 'testyuser') 54 | self.assertEqual(mock_print.call_count, 0) 55 | 56 | def test_down_with_reason(self): 57 | with self.setup_wrapper(['-r', 'something', sentinel_service_name]) as (spooler, mock_print): 58 | hacheck.haupdown.down() 59 | spooler.down.assert_called_once_with(sentinel_service_name, 'something') 60 | self.assertEqual(mock_print.call_count, 0) 61 | 62 | def test_status(self): 63 | with self.setup_wrapper([sentinel_service_name]) as (spooler, mock_print): 64 | spooler.status.return_value = (True, {}) 65 | hacheck.haupdown.status() 66 | spooler.status.assert_called_once_with(sentinel_service_name) 67 | mock_print.assert_called_once_with("UP\t%s", sentinel_service_name) 68 | 69 | def test_status_downed(self): 70 | with self.setup_wrapper() as (spooler, mock_print): 71 | spooler.status_all_down.return_value = [(sentinel_service_name, {'service': sentinel_service_name, 'reason': ''})] 72 | self.assertEqual(hacheck.haupdown.status_downed(), 0) 73 | mock_print.assert_called_once_with("DOWN\t%s\t%s", sentinel_service_name, mock.ANY) 74 | 75 | def test_list(self): 76 | with self.setup_wrapper() as (spooler, mock_print): 77 | with mock.patch.object(hacheck.haupdown, 'urlopen') as mock_urlopen: 78 | mock_urlopen.return_value.read.return_value = json.dumps({ 79 | "seen_services": ["foo"], 80 | "threshold_seconds": 10, 81 | }) 82 | self.assertEqual(hacheck.haupdown.halist(), 0) 83 | mock_urlopen.assert_called_once_with('http://127.0.0.1:3333/recent', timeout=mock.ANY) 84 | mock_print.assert_called_once_with("foo") 85 | -------------------------------------------------------------------------------- /tests/test_integration.py: -------------------------------------------------------------------------------- 1 | # test the whole shebang 2 | 3 | import tempfile 4 | import shutil 5 | 6 | import mock 7 | import tornado.testing 8 | import tornado.web 9 | 10 | import hacheck.cache 11 | import hacheck.main 12 | import hacheck.spool 13 | 14 | 15 | class PingHandler(tornado.web.RequestHandler): 16 | response_message = b'PONG' 17 | succeed = True 18 | 19 | def get(self): 20 | if self.succeed: 21 | self.write(self.response_message) 22 | else: 23 | self.set_status(503) 24 | self.write(b'FAIL') 25 | 26 | 27 | class ArgBarHandler(tornado.web.RequestHandler): 28 | def get(self): 29 | self.write(self.get_argument('bar')) 30 | 31 | 32 | class TestIntegration(tornado.testing.AsyncHTTPTestCase): 33 | def setUp(self): 34 | self.cwd = tempfile.mkdtemp() 35 | hacheck.spool.configure(spool_root=self.cwd) 36 | hacheck.cache.configure() 37 | super(TestIntegration, self).setUp() 38 | 39 | def tearDown(self): 40 | if self.cwd: 41 | shutil.rmtree(self.cwd) 42 | super(TestIntegration, self).tearDown() 43 | 44 | def get_app(self): 45 | hacheck_app = hacheck.main.get_app() 46 | hacheck_app.add_handlers(r'.*', [ 47 | (r'/pinged', PingHandler), 48 | (r'/arg_bar', ArgBarHandler), 49 | ]) 50 | return hacheck_app 51 | 52 | def test_selfie(self): 53 | response = self.fetch('/http/self/%d/status' % self.get_http_port()) 54 | self.assertEqual(200, response.code) 55 | 56 | def test_ping(self): 57 | response = self.fetch('/http/test_app/%d/pinged' % self.get_http_port()) 58 | self.assertEqual(200, response.code) 59 | self.assertEqual(b'PONG', response.body) 60 | 61 | def test_ping_fail(self): 62 | with mock.patch.object(PingHandler, 'succeed', False): 63 | response = self.fetch('/http/test_app/%d/pinged' % self.get_http_port()) 64 | self.assertEqual(503, response.code) 65 | self.assertEqual(b'FAIL', response.body) 66 | 67 | def test_down_and_up(self): 68 | hacheck.spool.down('test_app', 'TESTING') 69 | response = self.fetch('/http/test_app/%d/pinged' % self.get_http_port()) 70 | self.assertEqual(503, response.code) 71 | self.assertEqual(b'Service test_app in down state: TESTING', response.body) 72 | hacheck.spool.up('test_app') 73 | response = self.fetch('/http/test_app/%d/pinged' % self.get_http_port()) 74 | self.assertEqual(b'PONG', response.body) 75 | 76 | def test_caching(self): 77 | hacheck.spool.up('test_app') 78 | response = self.fetch('/http/test_app/%d/pinged' % self.get_http_port()) 79 | self.assertEqual(200, response.code) 80 | self.assertEqual(b'PONG', response.body) 81 | with mock.patch.object(PingHandler, 'response_message', b'dinged'): 82 | # first fetch should return the cached value 83 | response = self.fetch('/http/test_app/%d/pinged' % self.get_http_port()) 84 | self.assertEqual(200, response.code) 85 | self.assertEqual(b'PONG', response.body) 86 | # test that sending Pragma: no-cache overrides the cached value 87 | response = self.fetch('/http/test_app/%d/pinged' % self.get_http_port(), headers={'Pragma': 'no-cache'}) 88 | self.assertEqual(200, response.code) 89 | self.assertEqual(b'dinged', response.body) 90 | # subsequent requests should have the cache busted 91 | response = self.fetch('/http/test_app/%d/pinged' % self.get_http_port()) 92 | self.assertEqual(200, response.code) 93 | self.assertEqual(b'dinged', response.body) 94 | 95 | def test_query_parameters_bad(self): 96 | response = self.fetch('/http/test_app/%d/arg_bar' % self.get_http_port()) 97 | self.assertEqual(400, response.code) 98 | 99 | def test_query_parameters_good(self): 100 | response = self.fetch('/http/test_app/%d/arg_bar?bar=baz' % self.get_http_port()) 101 | self.assertEqual(200, response.code) 102 | self.assertEqual(b'baz', response.body) 103 | -------------------------------------------------------------------------------- /hacheck/handlers.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import logging 3 | import time 4 | 5 | import tornado.ioloop 6 | import tornado.httputil 7 | import tornado.httpclient 8 | import tornado.gen 9 | import tornado.web 10 | 11 | from . import cache 12 | from . import checker 13 | 14 | log = logging.getLogger('hacheck') 15 | 16 | StatusResponse = collections.namedtuple('StatusResponse', ['code', 'remote_ip', 'ts']) 17 | 18 | if hasattr(collections, 'Counter'): 19 | Counter = collections.Counter # fast 20 | else: 21 | def Counter(): 22 | return collections.defaultdict(lambda: 0) 23 | 24 | seen_services = {} 25 | service_count = collections.defaultdict(Counter) 26 | last_statuses = {} 27 | 28 | 29 | def _reset_stats(): 30 | seen_services.clear() 31 | service_count.clear() 32 | last_statuses.clear() 33 | 34 | 35 | class StatusHandler(tornado.web.RequestHandler): 36 | def get(self): 37 | stats = {} 38 | stats['cache'] = cache.get_stats() 39 | stats['uptime'] = time.time() - self.settings['start_time'] 40 | self.set_status(200) 41 | self.write(stats) 42 | 43 | 44 | class ListRecentHandler(tornado.web.RequestHandler): 45 | def get(self): 46 | now = time.time() 47 | recency_threshold = int(self.get_argument('threshold', 10 * 60)) 48 | response = [] 49 | for service_name, t in seen_services.items(): 50 | if now - t > recency_threshold: 51 | continue 52 | last_status = last_statuses.get(service_name, None) 53 | if last_status is not None: 54 | last_status = last_status._asdict() 55 | response.append((service_name, last_status)) 56 | self.write({ 57 | 'seen_services': list(sorted(response)), 58 | 'threshold_seconds': recency_threshold 59 | }) 60 | 61 | 62 | class ServiceCountHandler(tornado.web.RequestHandler): 63 | def get(self): 64 | self.write({'service_access_counts': dict(service_count)}) 65 | 66 | 67 | class BaseServiceHandler(tornado.web.RequestHandler): 68 | CHECKERS = [] 69 | 70 | @tornado.web.asynchronous 71 | @tornado.gen.coroutine 72 | def get(self, service_name, port, query): 73 | seen_services[service_name] = time.time() 74 | service_count[service_name][self.request.remote_ip] += 1 75 | with cache.maybe_bust(self.request.headers.get('Pragma', '') == 'no-cache'): 76 | port = int(port) 77 | last_message = "" 78 | querystr = self.request.query 79 | for this_checker in self.CHECKERS: 80 | code, message = yield this_checker( 81 | service_name, 82 | port, 83 | query, 84 | io_loop=tornado.ioloop.IOLoop.current(), 85 | query_params=querystr, 86 | headers=self.request.headers, 87 | ) 88 | last_message = message 89 | if code > 200: 90 | last_statuses[service_name] = StatusResponse(code, self.request.remote_ip, time.time()) 91 | if code in tornado.httputil.responses: 92 | self.set_status(code) 93 | else: 94 | self.set_status(503) 95 | self.write(message) 96 | self.finish() 97 | break 98 | else: 99 | last_statuses[service_name] = StatusResponse(200, self.request.remote_ip, time.time()) 100 | self.set_status(200) 101 | self.write(last_message) 102 | self.finish() 103 | 104 | 105 | class SpoolServiceHandler(BaseServiceHandler): 106 | CHECKERS = [checker.check_spool] 107 | 108 | 109 | class HTTPServiceHandler(BaseServiceHandler): 110 | CHECKERS = [checker.check_spool, checker.check_http] 111 | 112 | 113 | class HaproxyServiceHandler(BaseServiceHandler): 114 | CHECKERS = [checker.check_spool, checker.check_haproxy] 115 | 116 | 117 | class TCPServiceHandler(BaseServiceHandler): 118 | CHECKERS = [checker.check_spool, checker.check_tcp] 119 | 120 | 121 | class MySQLServiceHandler(BaseServiceHandler): 122 | CHECKERS = [checker.check_spool, checker.check_mysql] 123 | 124 | 125 | class RedisSentinelServiceHandler(BaseServiceHandler): 126 | CHECKERS = [checker.check_spool, checker.check_redis_sentinel] 127 | 128 | 129 | class RedisInfoServiceHandler(BaseServiceHandler): 130 | CHECKERS = [checker.check_spool, checker.check_redis_info] 131 | 132 | 133 | class SentinelInfoServiceHandler(BaseServiceHandler): 134 | CHECKERS = [checker.check_spool, checker.check_sentinel_info] 135 | -------------------------------------------------------------------------------- /hacheck/haupdown.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | from __future__ import print_function 4 | 5 | import contextlib 6 | import json 7 | import optparse 8 | import os 9 | import pwd 10 | import sys 11 | 12 | import six 13 | from six.moves.urllib.request import urlopen 14 | 15 | import hacheck.spool 16 | 17 | 18 | def up(): 19 | return main('up') 20 | 21 | 22 | def down(): 23 | return main('down') 24 | 25 | 26 | def halist(): 27 | return main('list') 28 | 29 | 30 | def status(): 31 | return main('status') 32 | 33 | 34 | def status_downed(): 35 | return main('status_downed') 36 | 37 | 38 | def print_s(fmt_string, *formats): 39 | """Print function split out for mocking""" 40 | print(fmt_string % formats) 41 | 42 | 43 | def main(default_action='list'): 44 | ACTIONS = ('up', 'down', 'status', 'status_downed', 'list') 45 | parser = optparse.OptionParser(usage='%prog [options] service_name(s)') 46 | parser.add_option( 47 | '--spool-root', 48 | default='/var/spool/hacheck', 49 | help='Root for spool for service states (default %default)' 50 | ) 51 | parser.add_option( 52 | '-a', 53 | '--action', 54 | type='choice', 55 | choices=ACTIONS, 56 | default=default_action, 57 | help='Action (one of %s, default %%default)' % ', '.join(ACTIONS, ) 58 | ) 59 | parser.add_option( 60 | '-r', 61 | '--reason', 62 | type=str, 63 | default="", 64 | help='Reason string when setting down' 65 | ) 66 | parser.add_option( 67 | '-p', 68 | '--port', 69 | type=str, 70 | default=3333, 71 | help='Port that the hacheck daemon is running on (default %(default)' 72 | ) 73 | opts, args = parser.parse_args() 74 | 75 | nonhumans = set() 76 | try: 77 | with open('/etc/nonhumans', 'r') as f: 78 | for line in f: 79 | unix_username = line.split('#')[0].strip() 80 | if unix_username: 81 | nonhumans.add(unix_username) 82 | except: 83 | pass 84 | if opts.action == 'down' and not opts.reason: 85 | if 'SUDO_USER' in os.environ: 86 | opts.reason = os.environ['SUDO_USER'] 87 | elif 'SSH_USER' in os.environ: 88 | opts.reason = os.environ['SSH_USER'] 89 | else: 90 | opts.reason = pwd.getpwuid(os.geteuid()).pw_name 91 | if opts.reason in nonhumans: 92 | print_s('please use --reason option to tell us who you REALLY are') 93 | return 1 94 | 95 | if opts.action in ('status', 'up', 'down'): 96 | if not args: 97 | parser.error('Expected args for action %s' % (opts.action)) 98 | service_names = args 99 | else: 100 | if args: 101 | parser.error('Unexpected args for action %s: %r' % (opts.action, args)) 102 | 103 | if opts.action == 'list': 104 | with contextlib.closing(urlopen( 105 | 'http://127.0.0.1:%d/recent' % opts.port, 106 | timeout=3 107 | )) as f: 108 | resp = json.load(f) 109 | for s in sorted(resp['seen_services']): 110 | if isinstance(s, six.string_types): 111 | print_s(s) 112 | else: 113 | service_name, last_response = s 114 | print_s('%s last_response=%s', service_name, json.dumps(last_response)) 115 | return 0 116 | elif opts.action == 'up': 117 | hacheck.spool.configure(opts.spool_root, needs_write=True) 118 | for service_name in service_names: 119 | hacheck.spool.up(service_name) 120 | return 0 121 | elif opts.action == 'down': 122 | hacheck.spool.configure(opts.spool_root, needs_write=True) 123 | for service_name in service_names: 124 | hacheck.spool.down(service_name, opts.reason) 125 | return 0 126 | elif opts.action == 'status_downed': 127 | hacheck.spool.configure(opts.spool_root, needs_write=False) 128 | for service_name, info in hacheck.spool.status_all_down(): 129 | print_s('DOWN\t%s\t%s', service_name, info.get('reason', '')) 130 | return 0 131 | else: 132 | hacheck.spool.configure(opts.spool_root, needs_write=False) 133 | rv = 0 134 | for service_name in service_names: 135 | status, info = hacheck.spool.status(service_name) 136 | if status: 137 | print_s('UP\t%s', service_name) 138 | else: 139 | print_s('DOWN\t%s\t%s', service_name, info.get('reason', '')) 140 | rv = 1 141 | return rv 142 | 143 | 144 | if __name__ == '__main__': 145 | sys.exit(main()) 146 | -------------------------------------------------------------------------------- /hacheck/main.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import optparse 3 | import signal 4 | import time 5 | import sys 6 | import resource 7 | 8 | import tornado.ioloop 9 | import tornado.httpserver 10 | import tornado.web 11 | from tornado.log import access_log 12 | 13 | from . import cache 14 | from . import config 15 | from . import handlers 16 | from . import spool 17 | 18 | try: 19 | from mutornadomon.config import initialize_mutornadomon 20 | except ImportError: 21 | initialize_mutornadomon = None 22 | 23 | 24 | def log_request(handler): 25 | # log requests at INFO instead of WARNING for all status codes 26 | request_time = 1000.0 * handler.request.request_time() 27 | access_log.debug("%d %s %.2fms", handler.get_status(), 28 | handler._request_summary(), request_time) 29 | 30 | 31 | def get_app(): 32 | return tornado.web.Application([ 33 | (r'/http/([a-zA-Z0-9_-]+)/([0-9]+)/(.*)', handlers.HTTPServiceHandler), 34 | (r'/tcp/([a-zA-Z0-9_-]+)/([0-9]+)/?(.*)', handlers.TCPServiceHandler), 35 | (r'/mysql/([a-zA-Z0-9_-]+)/([0-9]+)/?(.*)', handlers.MySQLServiceHandler), 36 | (r'/redis/([a-zA-Z0-9_-]+)/([0-9]+)/?(.*)', handlers.RedisSentinelServiceHandler), 37 | (r'/redis-info/([a-zA-Z0-9_-]+)/([0-9]+)/?(.*)', handlers.RedisInfoServiceHandler), 38 | (r'/sentinel/([a-zA-Z0-9_-]+)/([0-9]+)/?(.*)', handlers.RedisSentinelServiceHandler), 39 | (r'/sentinel-info/([a-zA-Z0-9_-]+)/([0-9]+)/?(.*)', handlers.SentinelInfoServiceHandler), 40 | (r'/spool/([a-zA-Z0-9_-]+)/([0-9]+)/?(.*)', handlers.SpoolServiceHandler), 41 | (r'/haproxy/([a-zA-Z0-9_-]+)/([0-9]+)/?(.*)', handlers.HaproxyServiceHandler), 42 | (r'/recent', handlers.ListRecentHandler), 43 | (r'/status/count', handlers.ServiceCountHandler), 44 | (r'/status', handlers.StatusHandler), 45 | ], start_time=time.time(), log_function=log_request) 46 | 47 | 48 | def setrlimit_nofile(soft_target): 49 | current_soft, current_hard = resource.getrlimit(resource.RLIMIT_NOFILE) 50 | if soft_target == 'max': 51 | desired_fd_limit = (current_hard, current_hard) 52 | elif soft_target > current_hard: 53 | raise ValueError('Targeted NOFILE rlimit %d is greater than hard limit %d' % (soft_target, current_hard)) 54 | else: 55 | desired_fd_limit = (soft_target, current_hard) 56 | resource.setrlimit(resource.RLIMIT_NOFILE, desired_fd_limit) 57 | 58 | 59 | def main(): 60 | parser = optparse.OptionParser() 61 | parser.add_option( 62 | '-c', 63 | '--config-file', 64 | default=None, 65 | help='Path to a YAML config file' 66 | ) 67 | parser.add_option( 68 | '-p', 69 | '--port', 70 | default=[], 71 | type=int, 72 | action='append', 73 | help='Port to listen on. May be repeated. If not passed, defaults to :3333.' 74 | ) 75 | parser.add_option( 76 | '-B', 77 | '--bind-address', 78 | default='0.0.0.0', 79 | help='Address to listen on. Defaults to %default' 80 | ) 81 | parser.add_option( 82 | '--spool-root', 83 | default='/var/spool/hacheck', 84 | help='Root for spool for service states (default %default)' 85 | ) 86 | parser.add_option( 87 | '-v', 88 | '--verbose', 89 | default=False, 90 | action='store_true' 91 | ) 92 | opts, args = parser.parse_args() 93 | if opts.config_file is not None: 94 | config.load_from(opts.config_file) 95 | 96 | if not opts.port: 97 | opts.port = [3333] 98 | if config.config['rlimit_nofile'] is not None: 99 | setrlimit_nofile(config.config['rlimit_nofile']) 100 | 101 | # set up logging 102 | log_path = config.config['log_path'] 103 | level = logging.DEBUG if opts.verbose else logging.WARNING 104 | if log_path == 'stdout': 105 | handler = logging.StreamHandler(sys.stdout) 106 | elif log_path == 'stderr': 107 | handler = logging.StreamHandler(sys.stderr) 108 | else: 109 | handler = logging.handlers.WatchedFileHandler(log_path) 110 | fmt = logging.Formatter(logging.BASIC_FORMAT, None) 111 | handler.setFormatter(fmt) 112 | logging.getLogger().addHandler(handler) 113 | logging.getLogger().setLevel(level) 114 | 115 | # application stuff 116 | cache.configure(cache_time=config.config['cache_time']) 117 | spool.configure(spool_root=opts.spool_root) 118 | application = get_app() 119 | ioloop = tornado.ioloop.IOLoop.instance() 120 | server = tornado.httpserver.HTTPServer(application, io_loop=ioloop) 121 | 122 | if initialize_mutornadomon is not None: 123 | mutornadomon_collector = initialize_mutornadomon(application, io_loop=ioloop) 124 | else: 125 | mutornadomon_collector = None 126 | 127 | def stop(*args): 128 | if mutornadomon_collector is not None: 129 | mutornadomon_collector.stop() 130 | ioloop.stop() 131 | 132 | for port in opts.port: 133 | server.listen(port, opts.bind_address) 134 | for sig in (signal.SIGTERM, signal.SIGQUIT, signal.SIGINT): 135 | signal.signal(sig, stop) 136 | ioloop.start() 137 | return 0 138 | 139 | 140 | if __name__ == '__main__': 141 | sys.exit(main()) 142 | -------------------------------------------------------------------------------- /hacheck/mysql.py: -------------------------------------------------------------------------------- 1 | """clean-room implementation of a mysql client supporting both *connect* and *quit* operations""" 2 | 3 | import datetime 4 | import socket 5 | import struct 6 | import sys 7 | import time 8 | from hashlib import sha1 9 | 10 | from . import compat 11 | 12 | import tornado.gen 13 | import tornado.iostream 14 | 15 | 16 | def _sxor(lhs, rhs): 17 | if sys.version_info > (3, 0): 18 | return b''.join(compat.bchr(a ^ b) for a, b in zip(lhs, rhs)) 19 | else: 20 | return b''.join(compat.bchr(ord(a) ^ ord(b)) for a, b in zip(lhs, rhs)) 21 | 22 | 23 | def _stupid_hash_password(salt, password): 24 | password = password.encode('utf-8') 25 | salt = salt.encode('utf-8') 26 | return _sxor( 27 | sha1(password).digest(), 28 | sha1( 29 | salt + sha1(sha1(password).digest()).digest() 30 | ).digest() 31 | ) 32 | 33 | 34 | def _read_lenc(buf, offset=0): 35 | first = struct.unpack('B', buf[offset:offset + 1])[0] 36 | if first < 0xfb: 37 | return first, offset + 1 38 | elif first == 0xfc: 39 | return struct.unpack(' 0xf0: 93 | self.OK = False 94 | else: 95 | self.OK = True 96 | 97 | def __repr__(self): 98 | return '%s(%s)<%s>' % (self.__class__.__name__, self.response_type, self.message) 99 | 100 | 101 | class MySQLClient(object): 102 | def __init__(self, host='127.0.0.1', port=3306, global_timeout=0, io_loop=None, timeout_callback=None): 103 | self.host = host 104 | self.port = port 105 | self.global_timeout = global_timeout 106 | self.timeout_callback = timeout_callback 107 | if io_loop is None: 108 | io_loop = tornado.ioloop.IOLoop.current() 109 | self.io_loop = io_loop 110 | self.socket = None 111 | self.stream = None 112 | self.start = 0 113 | self.timeout = None 114 | self.connected = False 115 | self.sequence = 1 116 | 117 | @tornado.gen.coroutine 118 | def _connect_socket(self): 119 | self.start = time.time() 120 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) 121 | self.stream = tornado.iostream.IOStream(s, io_loop=self.io_loop) 122 | if self.global_timeout: 123 | self.timeout = self.io_loop.add_timeout(datetime.timedelta(seconds=self.global_timeout), self._timed_out) 124 | yield tornado.gen.Task(self.stream.connect, (self.host, self.port)) 125 | self.connected = True 126 | 127 | def _timed_out(self): 128 | now = time.time() 129 | try: 130 | self.stream.close() 131 | except Exception: 132 | pass 133 | if self.timeout_callback is not None: 134 | self.timeout_callback(now - self.start) 135 | 136 | @tornado.gen.coroutine 137 | def connect(self, username, password): 138 | yield self._connect_socket() 139 | connection_response = yield self.read_response() 140 | assert connection_response.header == 0x0a 141 | connection_packet = struct.pack( 142 | '