├── cronitor ├── tests │ ├── __init__.py │ ├── cronitor.yaml │ ├── test_00.py │ ├── test_config.py │ ├── test_pings.py │ ├── test_integration.py │ └── test_monitor.py ├── __main__.py ├── __init__.py ├── celery.py └── monitor.py ├── requirements.txt ├── MANIFEST ├── setup.cfg ├── setup.py ├── .github └── workflows │ └── test.yml ├── LICENSE ├── .gitignore └── README.md /cronitor/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | requests==2.32.4 2 | pyyaml==6.0.1 3 | humanize==3.13.1 4 | -------------------------------------------------------------------------------- /MANIFEST: -------------------------------------------------------------------------------- 1 | # file GENERATED by distutils, do NOT edit 2 | setup.cfg 3 | setup.py 4 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [wheel] 2 | universal = 1 3 | 4 | [bdist_rpm] 5 | requires = python requests -------------------------------------------------------------------------------- /cronitor/tests/cronitor.yaml: -------------------------------------------------------------------------------- 1 | jobs: 2 | replenishment-report: 3 | schedule: '0 * * * *' 4 | data-warehouse-exports: 5 | schedule: '0 0 * * *' 6 | welcome-email: 7 | schedule: 'every 10 minutes' 8 | 9 | checks: 10 | cronitor-homepage: 11 | request: 12 | url: 'https://cronitor.io' 13 | assertions: 14 | - 'response.time < 2s' 15 | 16 | heartbeats: 17 | production-deploy: 18 | notify: 19 | alerts: 20 | - default 21 | events: 22 | complete: true -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup, find_packages 2 | 3 | with open("README.md", "r", encoding="utf-8") as fh: 4 | long_description = fh.read() 5 | 6 | setup( 7 | name='cronitor', 8 | version='4.9.0', 9 | packages=find_packages(), 10 | url='https://github.com/cronitorio/cronitor-python', 11 | license='MIT License', 12 | author='August Flanagan', 13 | author_email='august@cronitor.io', 14 | description='A lightweight Python client for Cronitor.', 15 | long_description = long_description, 16 | long_description_content_type = 'text/markdown', 17 | install_requires=[ 18 | 'requests', 19 | 'pyyaml', 20 | 'humanize', 21 | 'urllib3' 22 | ], 23 | entry_points=dict(console_scripts=['cronitor = cronitor.__main__:main']) 24 | ) 25 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Test 3 | 4 | on: 5 | push: 6 | branches: 7 | - master 8 | pull_request: 9 | branches: 10 | - master 11 | 12 | jobs: 13 | build: 14 | runs-on: ubuntu-latest 15 | strategy: 16 | fail-fast: false 17 | matrix: 18 | python-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] 19 | 20 | steps: 21 | - uses: actions/checkout@v4 22 | - name: Set up Python ${{ matrix.python-version }} 23 | uses: actions/setup-python@v5 24 | with: 25 | python-version: ${{ matrix.python-version }} 26 | - name: Install Dependencies 27 | run: pip install --upgrade pip && pip install -r requirements.txt 28 | - name: Run Tests 29 | run: | 30 | pip install pytest 31 | pytest 32 | -------------------------------------------------------------------------------- /cronitor/tests/test_00.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | import cronitor 3 | import unittest 4 | from unittest.mock import call, patch, ANY 5 | import time 6 | import cronitor 7 | 8 | FAKE_API_KEY = 'cb54ac4fd16142469f2d84fc1bbebd84XXXDEADXXX' 9 | YAML_PATH = './cronitor/tests/cronitor.yaml' 10 | 11 | cronitor.api_key = FAKE_API_KEY 12 | cronitor.timeout = 10 13 | 14 | class SyncTests(unittest.TestCase): 15 | 16 | def setUp(self): 17 | return super().setUp() 18 | 19 | def test_00_monitor_attributes_are_put(self): 20 | # This test will run first, test that attributes are synced correctly, and then undo the global mock 21 | 22 | with patch('cronitor.Monitor.put') as mock_put: 23 | time.sleep(2) 24 | calls = [call([{'key': 'ping-decorator-test', 'name': 'Ping Decorator Test'}])] 25 | mock_put.assert_has_calls(calls) 26 | 27 | @cronitor.job('ping-decorator-test', attributes={'name': 'Ping Decorator Test'}) 28 | def function_call_with_attributes(self): 29 | return 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Permission is hereby granted, free of charge, to any person obtaining a copy 4 | of this software and associated documentation files (the "Software"), to deal 5 | in the Software without restriction, including without limitation the rights 6 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 | copies of the Software, and to permit persons to whom the Software is 8 | furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all 11 | copies or substantial portions of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 19 | SOFTWARE. 20 | -------------------------------------------------------------------------------- /cronitor/tests/test_config.py: -------------------------------------------------------------------------------- 1 | import yaml 2 | import cronitor 3 | import unittest 4 | from unittest.mock import call, patch, ANY 5 | 6 | import cronitor 7 | 8 | FAKE_API_KEY = 'cb54ac4fd16142469f2d84fc1bbebd84XXXDEADXXX' 9 | YAML_PATH = './cronitor/tests/cronitor.yaml' 10 | 11 | cronitor.api_key = FAKE_API_KEY 12 | cronitor.timeout = 10 13 | 14 | with open(YAML_PATH, 'r') as conf: 15 | YAML_DATA = yaml.safe_load(conf) 16 | 17 | class CronitorTests(unittest.TestCase): 18 | 19 | def setUp(self): 20 | return super().setUp() 21 | 22 | def test_read_config(self): 23 | data = cronitor.read_config(YAML_PATH, output=True) 24 | self.assertIn('jobs', data) 25 | self.assertIn('checks', data) 26 | self.assertIn('heartbeats', data) 27 | 28 | @patch('cronitor.Monitor.put', return_value=YAML_DATA) 29 | def test_validate_config(self, mock): 30 | cronitor.config = YAML_PATH 31 | cronitor.validate_config() 32 | mock.assert_called_once_with(monitors=YAML_DATA, rollback=True, format='yaml') 33 | 34 | @patch('cronitor.Monitor.put') 35 | def test_apply_config(self, mock): 36 | cronitor.config = YAML_PATH 37 | cronitor.apply_config() 38 | mock.assert_called_once_with(monitors=YAML_DATA, rollback=False, format='yaml') 39 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Created by .ignore support plugin (hsz.mobi) 2 | ### Python template 3 | # Byte-compiled / optimized / DLL files 4 | __pycache__/ 5 | *.py[cod] 6 | *$py.class 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Distribution / packaging 12 | .Python 13 | env/ 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | *.egg-info/ 27 | .installed.cfg 28 | *.egg 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *,cover 49 | .hypothesis/ 50 | 51 | # Translations 52 | *.mo 53 | *.pot 54 | 55 | # Django stuff: 56 | *.log 57 | local_settings.py 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # dotenv 85 | .env 86 | 87 | # virtualenv 88 | .venv 89 | venv/ 90 | ENV/ 91 | 92 | # Spyder project settings 93 | .spyderproject 94 | 95 | # Rope project settings 96 | .ropeproject 97 | .idea 98 | 99 | #VSCode 100 | .vscode -------------------------------------------------------------------------------- /cronitor/__main__.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import sys 4 | 5 | from .monitor import Monitor 6 | 7 | 8 | def main(): 9 | parser = argparse.ArgumentParser(prog="cronitor", 10 | description='Send status messages to Cronitor ping API.') # noqa 11 | parser.add_argument('--apiKey', '-a', type=str, 12 | default=os.getenv('CRONITOR_API_KEY'), 13 | help='Auth Key from Account page') 14 | parser.add_argument('--id', '-i', type=str, 15 | default=os.getenv('CRONITOR_ID', os.getenv('CRONITOR_CODE')), 16 | help='Monitor Id to take action upon') 17 | # alias for id. deprecated. 18 | parser.add_argument('--code', '-c', type=str, 19 | default=os.getenv('CRONITOR_CODE'), 20 | help='DEPRECATED: Code for Monitor to take action upon. Alias of Id.') 21 | parser.add_argument('--msg', '-m', type=str, default='', 22 | help='Optional message to send with ping/fail') 23 | 24 | group = parser.add_mutually_exclusive_group(required=True) 25 | 26 | group.add_argument('--run', '-r', action='store_true', 27 | help='Send a run event') 28 | group.add_argument('--complete', '-C', action='store_true', 29 | help='Send a complete event') 30 | group.add_argument('--fail', '-f', action='store_true', 31 | help='Send a fail event') 32 | group.add_argument('--ok', '-o', action='store_true', 33 | help='Send an ok event') 34 | group.add_argument('--pause', '-p', type=str, default=24, 35 | help='Pause a monitor') 36 | 37 | args = parser.parse_args() 38 | 39 | if args.id is None and args.code is None: 40 | print('A monitorId must be supplied using the --id flag or setting the CRONITOR_ID enviromenment variable.') 41 | parser.print_help() 42 | sys.exit(1) 43 | 44 | monitor = Monitor(id=args.id, api_key=args.apiKey) 45 | 46 | if args.run: 47 | ret = monitor.ping('run', message=args.msg) 48 | elif args.complete: 49 | ret = monitor.ping('complete', message=args.msg) 50 | elif args.tick: 51 | ret = monitor.ping('tick', message=args.msg) 52 | elif args.fail: 53 | ret = monitor.ping('fail', message=args.msg) 54 | elif args.ok: 55 | ret = monitor.ping('ok', message=args.msg) 56 | elif args.pause: 57 | ret = monitor.pause(args.pause) 58 | else: 59 | ret = monitor.ping(message=args.msg) 60 | return ret 61 | 62 | 63 | if __name__ == '__main__': 64 | main() 65 | -------------------------------------------------------------------------------- /cronitor/tests/test_pings.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | from unittest.mock import patch, ANY, call 4 | from unittest.mock import MagicMock 5 | import cronitor 6 | import pytest 7 | 8 | # a reserved monitorkey for running integration tests against cronitor.link 9 | FAKE_KEY = 'd3x0c1' 10 | FAKE_API_KEY = 'ping-api-key' 11 | 12 | class MonitorPingTests(unittest.TestCase): 13 | 14 | def setUp(self): 15 | cronitor.api_key = FAKE_API_KEY 16 | 17 | def test_endpoints(self): 18 | monitor = cronitor.Monitor(key=FAKE_KEY) 19 | 20 | self.assertTrue(monitor.ping()) 21 | 22 | states = ['run', 'complete', 'fail', 'ok'] 23 | for state in states: 24 | self.assertTrue(monitor.ping(state=state)) 25 | 26 | 27 | @patch('cronitor.Monitor._req.get') 28 | def test_with_all_params(self, ping): 29 | 30 | monitor = cronitor.Monitor(FAKE_KEY, env='staging') 31 | 32 | params = { 33 | 'state': 'run', 34 | 'host': 'foo', 35 | 'message': 'test message', 36 | 'series': 'abc', 37 | 'metrics': { 38 | 'duration': 100, 39 | 'count': 5, 40 | 'error_count':2 41 | } 42 | } 43 | 44 | monitor.ping(**params) 45 | del params['metrics'] 46 | params['metric'] = [ANY, ANY, ANY,] 47 | params['env'] = monitor.env 48 | params['stamp'] = ANY 49 | 50 | ping.assert_called_once_with( 51 | headers={ 52 | 'User-Agent': 'cronitor-python', 53 | }, 54 | params=params, 55 | timeout=5, 56 | url='https://cronitor.link/p/{}/{}'.format(FAKE_API_KEY, FAKE_KEY)) 57 | 58 | 59 | def test_convert_metrics_hash(self): 60 | monitor = cronitor.Monitor(FAKE_KEY) 61 | clean = monitor._clean_params({ 'metrics': { 62 | 'duration': 100, 63 | 'count': 500, 64 | 'error_count': 20 65 | }}) 66 | self.assertListEqual(sorted(clean['metric']), sorted(['count:500', 'duration:100', 'error_count:20' ])) 67 | 68 | 69 | class PingDecoratorTests(unittest.TestCase): 70 | 71 | def setUp(self): 72 | cronitor.api_key = FAKE_API_KEY 73 | 74 | @patch('cronitor.Monitor.ping') 75 | def test_ping_wraps_function_success(self, mocked_ping): 76 | calls = [call(state='run', series=ANY), call(state='complete', series=ANY, metrics={'duration': ANY}, message=ANY)] 77 | self.function_call() 78 | mocked_ping.assert_has_calls(calls) 79 | 80 | @patch('cronitor.Monitor.ping') 81 | def test_ping_wraps_function_raises_exception(self, mocked_ping): 82 | calls = [call(state='run', series=ANY), call(state='fail', series=ANY, metrics={'duration': ANY}, message=ANY)] 83 | self.assertRaises(Exception, lambda: self.error_function_call()) 84 | mocked_ping.assert_has_calls(calls) 85 | 86 | 87 | @patch('cronitor.Monitor.ping') 88 | @patch('cronitor.Monitor.__init__') 89 | def test_ping_with_non_default_env(self, mocked_monitor, mocked_ping): 90 | mocked_monitor.return_value = None 91 | self.staging_env_function_call() 92 | mocked_monitor.assert_has_calls([call('ping-decorator-test', env='staging')]) 93 | 94 | @cronitor.job('ping-decorator-test') 95 | def function_call(self): 96 | return 97 | 98 | @cronitor.job('ping-decorator-test') 99 | def error_function_call(self): 100 | raise Exception 101 | 102 | @cronitor.job('ping-decorator-test', env='staging') 103 | def staging_env_function_call(self): 104 | return 105 | 106 | 107 | 108 | 109 | -------------------------------------------------------------------------------- /cronitor/__init__.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | from datetime import datetime 4 | from functools import wraps 5 | import sys 6 | import yaml 7 | from yaml.loader import SafeLoader 8 | import time 9 | import atexit 10 | import threading 11 | 12 | from .monitor import Monitor, YAML 13 | 14 | logger = logging.getLogger(__name__) 15 | logger.setLevel(logging.INFO) 16 | 17 | # configuration variables 18 | api_key = os.getenv('CRONITOR_API_KEY', None) 19 | api_version = os.getenv('CRONITOR_API_VERSION', None) 20 | environment = os.getenv('CRONITOR_ENVIRONMENT', None) 21 | config = os.getenv('CRONITOR_CONFIG', None) 22 | timeout = os.getenv('CRONITOR_TIMEOUT', None) 23 | if timeout is not None: 24 | timeout = int(timeout) 25 | 26 | celerybeat_only = False 27 | 28 | # monitor attributes can be synced at process startup 29 | monitor_attributes = [] 30 | 31 | # this is a pointer to the module object instance itself. 32 | this = sys.modules[__name__] 33 | if this.config: 34 | this.read_config() # set config vars contained within 35 | 36 | class MonitorNotFound(Exception): 37 | pass 38 | 39 | class ConfigValidationError(Exception): 40 | pass 41 | 42 | class APIValidationError(Exception): 43 | pass 44 | 45 | class AuthenticationError(Exception): 46 | pass 47 | 48 | class APIError(Exception): 49 | pass 50 | 51 | class State(object): 52 | OK = 'ok' 53 | RUN = 'run' 54 | COMPLETE = 'complete' 55 | FAIL = 'fail' 56 | 57 | # include_output is deprecated in favor of log_output and can be removed in 5.0 release 58 | def job(key, env=None, log_output=True, include_output=True, attributes=None): 59 | 60 | if type(attributes) is dict: 61 | attributes['key'] = key 62 | monitor_attributes.append(attributes) 63 | 64 | def wrapper(func): 65 | @wraps(func) 66 | def wrapped(*args, **kwargs): 67 | start = datetime.now().timestamp() 68 | 69 | monitor = Monitor(key, env=env) 70 | # use start as the series param to match run/fail/complete correctly 71 | monitor.ping(state=State.RUN, series=start) 72 | try: 73 | out = func(*args, **kwargs) 74 | except Exception as e: 75 | duration = datetime.now().timestamp() - start 76 | monitor.ping(state=State.FAIL, message=str(e), metrics={'duration': duration}, series=start) 77 | raise e 78 | 79 | duration = datetime.now().timestamp() - start 80 | message = str(out) if all([log_output, include_output]) else None 81 | monitor.ping(state=State.COMPLETE, message=message, metrics={'duration': duration}, series=start) 82 | return out 83 | 84 | return wrapped 85 | return wrapper 86 | 87 | def generate_config(): 88 | config = this.config or './cronitor.yaml' 89 | with open(config, 'w') as conf: 90 | conf.writelines(Monitor.as_yaml()) 91 | 92 | def validate_config(): 93 | return apply_config(rollback=True) 94 | 95 | def apply_config(rollback=False): 96 | if not this.config: 97 | raise ConfigValidationError("Must set a path to config file e.g. cronitor.config = './cronitor.yaml'") 98 | 99 | config = read_config(output=True) 100 | try: 101 | monitors = Monitor.put(monitors=config, rollback=rollback, format=YAML) 102 | job_count = len(monitors.get('jobs', [])) 103 | check_count = len(monitors.get('checks', [])) 104 | heartbeat_count = len(monitors.get('heartbeats', [])) 105 | total_count = sum([job_count, check_count, heartbeat_count]) 106 | logger.info('{} monitor{} {}'.format(total_count, 's' if total_count != 1 else '', 'validated.' if rollback else 'synced.',)) 107 | return True 108 | except (yaml.YAMLError, ConfigValidationError, APIValidationError, APIError, AuthenticationError) as e: 109 | logger.error(e) 110 | return False 111 | 112 | def read_config(path=None, output=False): 113 | this.config = path or this.config 114 | if not this.config: 115 | raise ConfigValidationError("Must include a path to config file e.g. cronitor.read_config('./cronitor.yaml')") 116 | 117 | with open(this.config, 'r') as conf: 118 | data = yaml.load(conf, Loader=SafeLoader) 119 | if output: 120 | return data 121 | 122 | def sync_monitors(wait=1): 123 | global monitor_attributes 124 | if wait > 0: 125 | time.sleep(wait) 126 | 127 | if len(monitor_attributes): 128 | Monitor.put(monitor_attributes) 129 | monitor_attributes = [] 130 | 131 | try: 132 | sync 133 | except NameError: 134 | sync = threading.Thread(target=sync_monitors) 135 | sync.start() 136 | atexit.register(sync.join) 137 | -------------------------------------------------------------------------------- /cronitor/tests/test_integration.py: -------------------------------------------------------------------------------- 1 | """ 2 | Integration tests that make real API calls to Cronitor. 3 | 4 | These tests are skipped by default unless CRONITOR_TEST_API_KEY environment 5 | variable is set. They test against the real Cronitor API to verify behavior. 6 | 7 | Usage: 8 | export CRONITOR_TEST_API_KEY=your_api_key_here 9 | python -m pytest cronitor/tests/test_integration.py -v 10 | # or 11 | python -m unittest cronitor.tests.test_integration -v 12 | """ 13 | 14 | import os 15 | import unittest 16 | import cronitor 17 | 18 | 19 | # Check if integration tests should run 20 | INTEGRATION_API_KEY = os.getenv('CRONITOR_TEST_API_KEY') 21 | SKIP_INTEGRATION = not INTEGRATION_API_KEY 22 | SKIP_REASON = "Set CRONITOR_TEST_API_KEY environment variable to run integration tests" 23 | 24 | 25 | @unittest.skipIf(SKIP_INTEGRATION, SKIP_REASON) 26 | class MonitorListIntegrationTests(unittest.TestCase): 27 | """Integration tests for Monitor.list() against real API""" 28 | 29 | @classmethod 30 | def setUpClass(cls): 31 | """Set up API key for all tests""" 32 | cronitor.api_key = INTEGRATION_API_KEY 33 | 34 | def test_list_all_monitors(self): 35 | """Test listing all monitors (first page)""" 36 | monitors = cronitor.Monitor.list() 37 | 38 | # Should return a list (may be empty for new accounts) 39 | self.assertIsInstance(monitors, list) 40 | 41 | # If there are monitors, verify structure 42 | if len(monitors) > 0: 43 | monitor = monitors[0] 44 | self.assertIsInstance(monitor, cronitor.Monitor) 45 | self.assertIsNotNone(monitor.data.key) 46 | self.assertIsNotNone(monitor.data.name) 47 | print(f"\n✓ Found {len(monitors)} monitors on first page (default pageSize=100)") 48 | print(f" First monitor: {monitor.data.name} ({monitor.data.key})") 49 | 50 | def test_list_all_monitors_auto_paginate(self): 51 | """Test listing all monitors with auto_paginate""" 52 | monitors_all = cronitor.Monitor.list(auto_paginate=True) 53 | 54 | self.assertIsInstance(monitors_all, list) 55 | 56 | # Check if there were multiple pages 57 | monitors_page1 = cronitor.Monitor.list(pageSize=100) 58 | if len(monitors_all) > 100: 59 | print(f"\n✓ Auto-paginate fetched all {len(monitors_all)} monitors across multiple pages") 60 | print(f" First page had {len(monitors_page1)}, total is {len(monitors_all)}") 61 | else: 62 | print(f"\n✓ Auto-paginate fetched {len(monitors_all)} monitors (all fit in one page)") 63 | 64 | def test_list_with_pagination(self): 65 | """Test listing monitors with specific page size""" 66 | monitors = cronitor.Monitor.list(pageSize=5) 67 | 68 | self.assertIsInstance(monitors, list) 69 | # Should return at most 5 monitors 70 | self.assertLessEqual(len(monitors), 5) 71 | print(f"\n✓ Pagination works, got {len(monitors)} monitors (max 5)") 72 | 73 | def test_list_with_filter(self): 74 | """Test listing monitors with type filter""" 75 | monitors = cronitor.Monitor.list(type='job') 76 | 77 | self.assertIsInstance(monitors, list) 78 | 79 | # Verify all returned monitors are jobs 80 | for monitor in monitors: 81 | self.assertEqual(monitor.data.type, 'job') 82 | 83 | print(f"\n✓ Filter works, got {len(monitors)} job monitors") 84 | 85 | def test_list_with_search(self): 86 | """Test listing monitors with search parameter""" 87 | monitors = cronitor.Monitor.list(search='test job') 88 | 89 | self.assertIsInstance(monitors, list) 90 | 91 | # Should return monitors matching search term 92 | if len(monitors) > 0: 93 | print(f"\n✓ Search works, found {len(monitors)} monitors matching 'test job'") 94 | for monitor in monitors[:3]: # Show first 3 95 | print(f" - {monitor.data.name} ({monitor.data.key})") 96 | else: 97 | print(f"\n✓ Search works, found 0 monitors matching 'test job'") 98 | 99 | def test_list_specific_keys(self): 100 | """Test listing specific monitors by key""" 101 | # First get some monitors to test with 102 | all_monitors = cronitor.Monitor.list(pageSize=2) 103 | 104 | if len(all_monitors) == 0: 105 | self.skipTest("No monitors found in account") 106 | 107 | # Get keys to fetch 108 | keys_to_fetch = [m.data.key for m in all_monitors[:min(2, len(all_monitors))]] 109 | 110 | # Fetch them specifically 111 | monitors = cronitor.Monitor.list(keys_to_fetch) 112 | 113 | self.assertEqual(len(monitors), len(keys_to_fetch)) 114 | returned_keys = [m.data.key for m in monitors] 115 | self.assertEqual(set(returned_keys), set(keys_to_fetch)) 116 | 117 | print(f"\n✓ Fetched specific monitors: {', '.join(keys_to_fetch)}") 118 | 119 | def test_monitor_data_structure(self): 120 | """Test that monitor data structure is correct""" 121 | monitors = cronitor.Monitor.list(pageSize=1) 122 | 123 | if len(monitors) == 0: 124 | self.skipTest("No monitors found in account") 125 | 126 | monitor = monitors[0] 127 | 128 | # Test basic fields exist 129 | self.assertIsNotNone(monitor.data.key) 130 | self.assertIsNotNone(monitor.data.name) 131 | self.assertIsNotNone(monitor.data.type) 132 | 133 | # Test nested attribute access works 134 | self.assertIsNotNone(monitor.data.attributes) 135 | self.assertIsNotNone(monitor.data.attributes.code) 136 | 137 | # Test pretty printing works 138 | json_str = str(monitor.data) 139 | self.assertIn(monitor.data.key, json_str) 140 | self.assertIn('\n', json_str) # Pretty formatted 141 | 142 | print(f"\n✓ Monitor data structure correct") 143 | print(f" Key: {monitor.data.key}") 144 | print(f" Name: {monitor.data.name}") 145 | print(f" Type: {monitor.data.type}") 146 | 147 | 148 | if __name__ == '__main__': 149 | if SKIP_INTEGRATION: 150 | print(f"\n⚠️ {SKIP_REASON}\n") 151 | print("Example:") 152 | print(" export CRONITOR_TEST_API_KEY=your_api_key_here") 153 | print(" python -m unittest cronitor.tests.test_integration -v\n") 154 | else: 155 | print(f"\n🚀 Running integration tests against Cronitor API...\n") 156 | unittest.main() 157 | -------------------------------------------------------------------------------- /cronitor/celery.py: -------------------------------------------------------------------------------- 1 | import typing 2 | import datetime 3 | import humanize 4 | import logging 5 | from cronitor import State, Monitor 6 | import cronitor 7 | import functools 8 | import shutil 9 | import tempfile 10 | import sys 11 | 12 | logger = logging.getLogger(__name__) 13 | try: 14 | import celery 15 | import celery.beat 16 | from celery.schedules import crontab, schedule, solar 17 | from celery.signals import beat_init, task_prerun, task_failure, task_success, task_retry 18 | 19 | if typing.TYPE_CHECKING: 20 | from typing import Dict, List, Union, Optional, Tuple 21 | import billiard.einfo 22 | from celery.worker.request import Request 23 | except ImportError: 24 | logger.error("Cannot use the cronitor.celery module without celery installed") 25 | sys.exit(1) 26 | 27 | # For the signals to properly register, they need to be top-level objects. 28 | # Since they are defined dynamically in initialize(), we have to declare them up top, 29 | # make them global, and override them. 30 | celerybeat_startup = None 31 | ping_monitor_before_task = None 32 | ping_monitor_on_success = None 33 | ping_monitor_on_failure = None 34 | ping_monitor_on_retry = None 35 | 36 | 37 | def get_headers_from_task(task): # type: (celery.Task) -> Dict 38 | headers = task.request.headers or {} 39 | request = task.request or {} 40 | headers.update(request.get("properties", {}).get("application_headers", {})) 41 | return headers 42 | 43 | 44 | def initialize(app, celerybeat_only=False, api_key=None): # type: (celery.Celery, bool, Optional[str]) -> None 45 | if api_key: 46 | cronitor.api_key = api_key 47 | 48 | if celerybeat_only: 49 | cronitor.celerybeat_only = True 50 | 51 | global celerybeat_startup 52 | global ping_monitor_before_task 53 | global ping_monitor_on_success 54 | global ping_monitor_on_failure 55 | global ping_monitor_on_retry 56 | 57 | def celerybeat_startup(sender, **kwargs): # type: (celery.beat.Service, Dict) -> None 58 | # To avoid recursion, since restarting celerybeat will result in this 59 | # signal being called again, we disconnect the signal. 60 | beat_init.disconnect(celerybeat_startup, dispatch_uid=1) 61 | 62 | # Must use the cached_property from scheduler so as not to re-open the shelve database 63 | scheduler = sender.scheduler # type: celery.beat.Scheduler 64 | # Also need to use the property here, including for django-celery-beat 65 | schedules = scheduler.schedule 66 | monitors = [] # type: List[Dict[str, str]] 67 | 68 | add_periodic_task_deferred = [] 69 | for name in schedules: 70 | if name.startswith('celery.'): 71 | continue 72 | entry = schedules[name] # type: celery.beat.ScheduleEntry 73 | 74 | # ignore all celerybeat scheduled events with the Cronitor exclusion header 75 | headers = entry.options.pop('headers', {}) 76 | if headers.get('x-cronitor-exclude') in (True, 'true', 'True'): 77 | logger.info("celerybeat entry '{}' ignored per exclusion header".format(name)) 78 | continue 79 | 80 | item = entry.schedule # type: celery.schedules.schedule 81 | if isinstance(item, crontab): 82 | cronitor_schedule = ('{0._orig_minute} {0._orig_hour} {0._orig_day_of_week} {0._orig_day_of_month} ' 83 | '{0._orig_month_of_year}').format(item) 84 | elif isinstance(item, schedule): 85 | freq = item.run_every # type: datetime.timedelta 86 | cronitor_schedule = 'every ' + humanize.precisedelta(freq) 87 | elif isinstance(item, solar): 88 | # We don't support solar schedules 89 | logger.warning("The cronitor-python celery module does not support " 90 | "tasks using solar schedules. Task schedule '{}' will " 91 | "not be monitored".format(name)) 92 | continue 93 | else: 94 | logger.warning("The cronitor-python celery module does not support " 95 | "schedules of type `{}`".format(type(item))) 96 | continue 97 | 98 | monitors.append({ 99 | 'type': 'job', 100 | 'key': name, 101 | 'schedule': cronitor_schedule, 102 | }) 103 | 104 | headers.update({ 105 | 'x-cronitor-task-origin': 'celerybeat', 106 | 'x-cronitor-celerybeat-name': name, 107 | }) 108 | 109 | add_periodic_task_deferred.append( 110 | functools.partial(app.add_periodic_task, 111 | entry.schedule, 112 | # Setting headers in the signature 113 | # works better than in periodic task options 114 | app.tasks.get(entry.task).s().set(headers=headers), 115 | args=entry.args, kwargs=entry.kwargs, 116 | name=entry.name, **(entry.options or {})) 117 | ) 118 | 119 | if isinstance(sender.scheduler, celery.beat.PersistentScheduler): 120 | # The celerybeat-schedule file with shelve gets corrupted really easily, so we need 121 | # to set up a tempfile instead. 122 | new_schedule = tempfile.NamedTemporaryFile() 123 | with open(sender.schedule_filename, 'rb') as current_schedule: 124 | shutil.copyfileobj(current_schedule, new_schedule) 125 | # We need to stop and restart celerybeat to get the task updates in place. 126 | # This isn't ideal, but seems to work. 127 | 128 | sender.stop() 129 | # Now, actually add all the periodic tasks to overwrite beat with the headers 130 | for task in add_periodic_task_deferred: 131 | task() 132 | # Then, restart celerybeat, on the new schedule file (copied from the old one) 133 | app.Beat(schedule=new_schedule.name).run() 134 | 135 | else: 136 | # For django-celery, etc., we don't need to stop and restart celerybeat 137 | for task in add_periodic_task_deferred: 138 | task() 139 | 140 | logger.debug("[Cronitor] creating monitors: %s", [m['key'] for m in monitors]) 141 | Monitor.put(monitors) 142 | 143 | beat_init.connect(celerybeat_startup, dispatch_uid=1) 144 | 145 | @task_prerun.connect 146 | def ping_monitor_before_task(sender, **kwargs): # type: (celery.Task, Dict) -> None 147 | headers = get_headers_from_task(sender) 148 | if 'x-cronitor-celerybeat-name' in headers: 149 | monitor = Monitor(headers['x-cronitor-celerybeat-name']) 150 | elif not cronitor.celerybeat_only: 151 | monitor = Monitor(sender.name) 152 | else: 153 | return 154 | 155 | monitor.ping(state=State.RUN, series=sender.request.id) 156 | 157 | @task_success.connect 158 | def ping_monitor_on_success(sender, **kwargs): # type: (celery.Task, Dict) -> None 159 | headers = get_headers_from_task(sender) 160 | if 'x-cronitor-celerybeat-name' in headers: 161 | monitor = Monitor(headers['x-cronitor-celerybeat-name']) 162 | elif not cronitor.celerybeat_only: 163 | monitor = Monitor(sender.name) 164 | else: 165 | return 166 | 167 | monitor.ping(state=State.COMPLETE, series=sender.request.id) 168 | 169 | @task_failure.connect 170 | def ping_monitor_on_failure(sender, # type: celery.Task 171 | task_id, # type: str 172 | exception, # type: Exception 173 | args, # type: Tuple 174 | kwargs, # type: Dict 175 | traceback, 176 | einfo, # type: billiard.einfo.ExceptionInfo 177 | **kwargs2 # type: Dict 178 | ): 179 | headers = get_headers_from_task(sender) 180 | if 'x-cronitor-celerybeat-name' in headers: 181 | monitor = Monitor(headers['x-cronitor-celerybeat-name']) 182 | elif not cronitor.celerybeat_only: 183 | monitor = Monitor(sender.name) 184 | else: 185 | return 186 | 187 | monitor.ping(state=State.FAIL, series=sender.request.id, message=str(exception)) 188 | 189 | @task_retry.connect 190 | def ping_monitor_on_retry(sender, # type: celery.Task 191 | request, # type: celery.worker.request.Request 192 | reason, # type: Union[Exception, str] 193 | einfo, # type: billiard.einfo.ExceptionInfo 194 | **kwargs, # type: Dict 195 | ): 196 | headers = get_headers_from_task(sender) 197 | if 'x-cronitor-celerybeat-name' in headers: 198 | monitor = Monitor(headers['x-cronitor-celerybeat-name']) 199 | elif not cronitor.celerybeat_only: 200 | monitor = Monitor(sender.name) 201 | else: 202 | return 203 | 204 | monitor.ping(state=State.FAIL, series=sender.request.id, message=str(reason)) 205 | -------------------------------------------------------------------------------- /cronitor/monitor.py: -------------------------------------------------------------------------------- 1 | import time 2 | import yaml 3 | import logging 4 | import json 5 | import os 6 | import requests 7 | from yaml.loader import SafeLoader 8 | 9 | 10 | import cronitor 11 | from urllib3.util.retry import Retry 12 | from requests.adapters import HTTPAdapter 13 | 14 | logger = logging.getLogger(__name__) 15 | 16 | # https://stackoverflow.com/questions/49121365/implementing-retry-for-requests-in-python 17 | def retry_session(retries, session=None, backoff_factor=0.3): 18 | session = session or requests.Session() 19 | retry = Retry( 20 | total=retries, 21 | read=retries, 22 | connect=retries, 23 | backoff_factor=backoff_factor, 24 | ) 25 | adapter = HTTPAdapter(max_retries=retry) 26 | session.mount('http://', adapter) 27 | session.mount('https://', adapter) 28 | return session 29 | 30 | JSON = 'json' 31 | YAML = 'yaml' 32 | 33 | class Monitor(object): 34 | _headers = { 35 | 'User-Agent': 'cronitor-python', 36 | } 37 | 38 | _req = retry_session(retries=3) 39 | 40 | @classmethod 41 | def as_yaml(cls, api_key=None, api_version=None): 42 | timeout = cronitor.timeout or 10 43 | api_key = api_key or cronitor.api_key 44 | resp = cls._req.get('%s.yaml' % cls._monitor_api_url(), 45 | auth=(api_key, ''), 46 | headers=dict(cls._headers, **{'Content-Type': 'application/yaml', 'Cronitor-Version': api_version}), 47 | timeout=timeout) 48 | if resp.status_code == 200: 49 | return resp.text 50 | else: 51 | raise cronitor.APIError("Unexpected error %s" % resp.text) 52 | 53 | @classmethod 54 | def put(cls, monitors=None, **kwargs): 55 | api_key = cronitor.api_key 56 | api_version = cronitor.api_version 57 | request_format = JSON 58 | 59 | rollback = False 60 | if 'rollback' in kwargs: 61 | rollback = kwargs['rollback'] 62 | del kwargs['rollback'] 63 | if 'api_key' in kwargs: 64 | api_key = kwargs['api_key'] 65 | del kwargs['api_key'] 66 | if 'api_version' in kwargs: 67 | api_version = kwargs['api_version'] 68 | del kwargs['api_version'] 69 | if 'format' in kwargs: 70 | request_format = kwargs['format'] 71 | del kwargs['format'] 72 | 73 | _monitors = monitors or [kwargs] 74 | nested_format = True if type(monitors) == dict else False 75 | 76 | data = cls._put(_monitors, api_key, rollback, request_format, api_version) 77 | 78 | if nested_format: 79 | return data 80 | 81 | _monitors = [] 82 | for md in data: 83 | m = cls(md['key']) 84 | m.data = md 85 | _monitors.append(m) 86 | 87 | return _monitors if len(_monitors) > 1 else _monitors[0] 88 | 89 | @classmethod 90 | def _put(cls, monitors, api_key, rollback, request_format, api_version): 91 | timeout = cronitor.timeout or 10 92 | payload = _prepare_payload(monitors, rollback, request_format) 93 | if request_format == YAML: 94 | content_type = 'application/yaml' 95 | data = yaml.dump(payload) 96 | url = '{}.yaml'.format(cls._monitor_api_url()) 97 | else: 98 | content_type = 'application/json' 99 | data = json.dumps(payload) 100 | url = cls._monitor_api_url() 101 | 102 | resp = cls._req.put(url, 103 | auth=(api_key, ''), 104 | data=data, 105 | headers=dict(cls._headers, **{'Content-Type': content_type, 'Cronitor-Version': api_version}), 106 | timeout=timeout) 107 | 108 | if resp.status_code == 200: 109 | if request_format == YAML: 110 | return yaml.load(resp.text, Loader=SafeLoader) 111 | else: 112 | return resp.json().get('monitors', []) 113 | elif resp.status_code == 400: 114 | raise cronitor.APIValidationError(resp.text) 115 | else: 116 | raise cronitor.APIError("Unexpected error %s" % resp.text) 117 | 118 | def __init__(self, key, api_key=None, api_version=None, env=None): 119 | self.key = key 120 | self.api_key = api_key or cronitor.api_key 121 | self.api_verion = api_version or cronitor.api_version 122 | self.env = env or cronitor.environment 123 | self._data = None 124 | 125 | @property 126 | def data(self): 127 | """ 128 | Monitor data with attribute access. Nested dicts are automatically 129 | converted to Structs. 130 | 131 | Example: 132 | >>> monitor = Monitor('my-monitor') 133 | >>> print(monitor.data.name) 134 | >>> print(monitor.data.request.url) 135 | >>> print(monitor.data) # Pretty JSON output 136 | """ 137 | if self._data and type(self._data) is not Struct: 138 | self._data = Struct(**self._data) 139 | elif not self._data: 140 | self._data = Struct(**self._fetch()) 141 | return self._data 142 | 143 | @data.setter 144 | def data(self, data): 145 | self._data = Struct(**data) 146 | 147 | def delete(self): 148 | resp = requests.delete( 149 | self._monitor_api_url(self.key), 150 | auth=(self.api_key, ''), 151 | headers=self._headers, 152 | timeout=10) 153 | 154 | if resp.status_code == 204: 155 | return True 156 | elif resp.status_code == 404: 157 | raise cronitor.MonitorNotFound("Monitor '%s' not found" % self.key) 158 | else: 159 | raise cronitor.APIError("An unexpected error occured when deleting '%s'" % self.key) 160 | 161 | def ping(self, **params): 162 | if not self.api_key: 163 | logger.error('No API key detected. Set cronitor.api_key or initialize Monitor with kwarg api_key.') 164 | return 165 | 166 | return self._req.get(url=self._ping_api_url(), params=self._clean_params(params), timeout=5, headers=self._headers) 167 | 168 | def ok(self): 169 | self.ping(state=cronitor.State.OK) 170 | 171 | def pause(self, hours): 172 | if not self.api_key: 173 | logger.error('No API key detected. Set cronitor.api_key or initialize Monitor with kwarg api_key.') 174 | return 175 | 176 | return self._req.get(url='{}/pause/{}'.format(self._monitor_api_url(self.key), hours), auth=(self.api_key, ''), timeout=5, headers=self._headers) 177 | 178 | def unpause(self): 179 | return self.pause(0) 180 | 181 | def _fetch(self): 182 | if not self.api_key: 183 | raise cronitor.AuthenticationError('No api_key detected. Set cronitor.api_key or initialize Monitor with kwarg.') 184 | 185 | resp = requests.get(self._monitor_api_url(self.key), 186 | timeout=10, 187 | auth=(self.api_key, ''), 188 | headers=dict(self._headers, **{'Content-Type': 'application/json', 'Cronitor-Version': self.api_verion})) 189 | 190 | if resp.status_code == 404: 191 | raise cronitor.MonitorNotFound("Monitor '%s' not found" % self.key) 192 | return resp.json() 193 | 194 | def _clean_params(self, params): 195 | metrics = None 196 | if 'metrics' in params and type(params['metrics']) == dict: 197 | metrics = ['{}:{}'.format(k,v) for k,v in params['metrics'].items()] 198 | 199 | return { 200 | 'state': params.get('state', None), 201 | 'message': params.get('message', None), 202 | 'series': params.get('series', None), 203 | 'host': params.get('host', os.getenv('COMPUTERNAME', None)), 204 | 'metric': metrics, 205 | 'stamp': time.time(), 206 | 'env': self.env, 207 | } 208 | 209 | def _ping_api_url(self): 210 | return "https://cronitor.link/p/{}/{}".format(self.api_key, self.key) 211 | 212 | @classmethod 213 | def list(cls, keys=None, page=1, pageSize=100, auto_paginate=False, **filters): 214 | """ 215 | Fetch monitors with optional filtering and pagination. 216 | 217 | Args: 218 | keys: Optional list of monitor keys to fetch specifically 219 | page: Page number (default: 1) 220 | pageSize: Results per page (default: 100) 221 | auto_paginate: If True, automatically fetch all pages (default: False) 222 | **filters: type, group, tag, state, env, search, sort 223 | 224 | Returns: 225 | List of Monitor instances 226 | 227 | Examples: 228 | # Fetch specific monitors 229 | monitors = Monitor.list(['key1', 'key2']) 230 | 231 | # Fetch first page of job monitors 232 | monitors = Monitor.list(type='job') 233 | 234 | # Fetch specific page 235 | monitors = Monitor.list(type='job', page=2, pageSize=50) 236 | 237 | # Fetch all pages automatically 238 | monitors = Monitor.list(type='job', auto_paginate=True) 239 | """ 240 | if keys: 241 | # Fetch specific monitors individually 242 | monitors = [cls(key) for key in keys] 243 | # Populate data immediately 244 | for m in monitors: 245 | _ = m.data # Triggers fetch 246 | return monitors 247 | 248 | # Fetch from API with filters 249 | monitors = [] 250 | current_page = page 251 | 252 | while True: 253 | result = cls._fetch_page(current_page, pageSize, **filters) 254 | monitors.extend(result) 255 | 256 | if not auto_paginate or len(result) < pageSize: 257 | # Either not auto-paginating or no more results 258 | break 259 | 260 | current_page += 1 261 | 262 | return monitors 263 | 264 | @classmethod 265 | def _fetch_page(cls, page, pageSize, **filters): 266 | """Fetch a single page of monitors from the API""" 267 | api_key = filters.pop('api_key', None) or cronitor.api_key 268 | api_version = filters.pop('api_version', None) or cronitor.api_version 269 | timeout = cronitor.timeout or 10 270 | 271 | params = dict(filters, page=page, pageSize=pageSize) 272 | 273 | resp = cls._req.get( 274 | cls._monitor_api_url(), 275 | auth=(api_key, ''), 276 | params=params, 277 | headers=dict(cls._headers, **{'Cronitor-Version': api_version}), 278 | timeout=timeout 279 | ) 280 | 281 | if resp.status_code == 200: 282 | data = resp.json() 283 | monitors = [] 284 | for monitor_data in data.get('monitors', []): 285 | m = cls(monitor_data['key']) 286 | m.data = monitor_data 287 | monitors.append(m) 288 | return monitors 289 | else: 290 | raise cronitor.APIError("Unexpected error %s" % resp.text) 291 | 292 | @classmethod 293 | def _monitor_api_url(cls, key=None): 294 | if not key: return "https://cronitor.io/api/monitors" 295 | return "https://cronitor.io/api/monitors/{}".format(key) 296 | 297 | def _prepare_payload(monitors, rollback=False, request_format=JSON): 298 | ret = {} 299 | if request_format == JSON: 300 | ret['monitors'] = monitors 301 | if request_format == YAML: 302 | ret = monitors 303 | if rollback: 304 | ret['rollback'] = True 305 | return ret 306 | 307 | 308 | class Struct(object): 309 | def __init__(self, **kwargs): 310 | for key, value in kwargs.items(): 311 | if isinstance(value, dict): 312 | value = Struct(**value) 313 | elif isinstance(value, list): 314 | value = [Struct(**item) if isinstance(item, dict) else item for item in value] 315 | setattr(self, key, value) 316 | 317 | def __repr__(self): 318 | items = ', '.join(f'{k}={v!r}' for k, v in sorted(self.__dict__.items())) 319 | return f"Struct({items})" 320 | 321 | def __str__(self): 322 | return json.dumps(self._to_dict(), indent=2, sort_keys=True, default=str) 323 | 324 | def _to_dict(self): 325 | result = {} 326 | for key, value in self.__dict__.items(): 327 | if isinstance(value, Struct): 328 | result[key] = value._to_dict() 329 | elif isinstance(value, list): 330 | result[key] = [item._to_dict() if isinstance(item, Struct) else item for item in value] 331 | else: 332 | result[key] = value 333 | return result 334 | -------------------------------------------------------------------------------- /cronitor/tests/test_monitor.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import cronitor 3 | import unittest 4 | from unittest.mock import call, patch, ANY 5 | 6 | import cronitor 7 | 8 | FAKE_API_KEY = 'cb54ac4fd16142469f2d84fc1bbebd84XXXDEADXXX' 9 | 10 | MONITOR = { 11 | 'type': 'job', 12 | 'key': 'a-test_key', 13 | 'schedule': '* * * * *', 14 | 'assertions': [ 15 | 'metric.duration < 10 seconds' 16 | ], 17 | # 'notify': ['devops-alerts'] 18 | } 19 | MONITOR_2 = copy.deepcopy(MONITOR) 20 | MONITOR_2['key'] = 'another-test-key' 21 | 22 | YAML_FORMAT_MONITORS = { 23 | 'jobs': { 24 | MONITOR['key']: MONITOR, 25 | MONITOR_2['key']: MONITOR_2 26 | } 27 | } 28 | 29 | cronitor.api_key = FAKE_API_KEY 30 | 31 | class MonitorTests(unittest.TestCase): 32 | 33 | @patch('cronitor.Monitor._put', return_value=[MONITOR]) 34 | def test_create_monitor(self, mocked_create): 35 | monitor = cronitor.Monitor.put(**MONITOR) 36 | self.assertEqual(monitor.data.key, MONITOR['key']) 37 | self.assertEqual(monitor.data.assertions, MONITOR['assertions']) 38 | self.assertEqual(monitor.data.schedule, MONITOR['schedule']) 39 | 40 | @patch('cronitor.Monitor._put', return_value=[MONITOR, MONITOR_2]) 41 | def test_create_monitors(self, mocked_create): 42 | monitors = cronitor.Monitor.put([MONITOR, MONITOR_2]) 43 | self.assertEqual(len(monitors), 2) 44 | self.assertCountEqual([MONITOR['key'], MONITOR_2['key']], list(map(lambda m: m.data.key, monitors))) 45 | 46 | @patch('cronitor.Monitor._req.put') 47 | def test_create_monitor_fails(self, mocked_put): 48 | mocked_put.return_value.status_code = 400 49 | with self.assertRaises(cronitor.APIValidationError): 50 | cronitor.Monitor.put(**MONITOR) 51 | 52 | @patch('requests.get') 53 | def test_get_monitor_invalid_code(self, mocked_get): 54 | mocked_get.return_value.status_code = 404 55 | with self.assertRaises(cronitor.MonitorNotFound): 56 | monitor = cronitor.Monitor("I don't exist") 57 | monitor.data 58 | 59 | @patch('cronitor.Monitor._put') 60 | def test_update_monitor_data(self, mocked_update): 61 | monitor_data = MONITOR.copy() 62 | monitor_data.update({'name': 'Updated Name'}) 63 | mocked_update.return_value = [monitor_data] 64 | 65 | monitor = cronitor.Monitor.put(key=MONITOR['key'], name='Updated Name') 66 | self.assertEqual(monitor.data.name, 'Updated Name') 67 | 68 | @patch('cronitor.Monitor._req.put') 69 | def test_update_monitor_fails_validation(self, mocked_update): 70 | mocked_update.return_value.status_code = 400 71 | with self.assertRaises(cronitor.APIValidationError): 72 | cronitor.Monitor.put(schedule='* * * * *') 73 | 74 | @patch('cronitor.Monitor._put', return_value=YAML_FORMAT_MONITORS) 75 | def test_create_monitors_yaml_body(self, mocked_create): 76 | monitors = cronitor.Monitor.put(monitors=YAML_FORMAT_MONITORS, format='yaml') 77 | self.assertIn(MONITOR['key'], monitors['jobs']) 78 | self.assertIn(MONITOR_2['key'], monitors['jobs']) 79 | 80 | @patch('requests.delete') 81 | def test_delete_no_id(self, mocked_delete): 82 | mocked_delete.return_value.status_code = 204 83 | monitor = cronitor.Monitor(MONITOR['key']) 84 | monitor.delete() 85 | 86 | @patch('cronitor.Monitor._put') 87 | def test_struct_nested_dict_access(self, mocked_put): 88 | """Test that nested dicts are converted to Structs for attribute access""" 89 | monitor_with_nested = { 90 | 'key': 'test-key', 91 | 'name': 'Test Monitor', 92 | 'attributes': { 93 | 'code': 'ABC123', 94 | 'group_name': 'production', 95 | }, 96 | 'latest_event': { 97 | 'stamp': 1234567890.0, 98 | 'event': 'complete', 99 | 'metrics': {'duration': 1.5}, 100 | } 101 | } 102 | mocked_put.return_value = [monitor_with_nested] 103 | 104 | monitor = cronitor.Monitor.put(**monitor_with_nested) 105 | 106 | # Test nested attribute access 107 | self.assertEqual(monitor.data.attributes.code, 'ABC123') 108 | self.assertEqual(monitor.data.attributes.group_name, 'production') 109 | self.assertEqual(monitor.data.latest_event.event, 'complete') 110 | self.assertEqual(monitor.data.latest_event.stamp, 1234567890.0) 111 | 112 | # Test deeply nested dict is also converted to Struct 113 | self.assertEqual(monitor.data.latest_event.metrics.duration, 1.5) 114 | 115 | @patch('cronitor.Monitor._put') 116 | def test_struct_list_with_dicts(self, mocked_put): 117 | """Test that lists containing dicts are converted properly""" 118 | monitor_with_list = { 119 | 'key': 'test-key', 120 | 'name': 'Test Monitor', 121 | 'latest_events': [ 122 | {'stamp': 1234567890.0, 'event': 'run'}, 123 | {'stamp': 1234567900.0, 'event': 'complete'}, 124 | ] 125 | } 126 | mocked_put.return_value = [monitor_with_list] 127 | 128 | monitor = cronitor.Monitor.put(**monitor_with_list) 129 | 130 | # Test list items are converted to Structs 131 | self.assertEqual(len(monitor.data.latest_events), 2) 132 | self.assertEqual(monitor.data.latest_events[0].event, 'run') 133 | self.assertEqual(monitor.data.latest_events[1].event, 'complete') 134 | self.assertEqual(monitor.data.latest_events[1].stamp, 1234567900.0) 135 | 136 | @patch('cronitor.Monitor._put') 137 | def test_struct_str_pretty_print(self, mocked_put): 138 | """Test that Struct.__str__ returns pretty JSON""" 139 | monitor_data = { 140 | 'key': 'test-key', 141 | 'name': 'Test Monitor', 142 | 'type': 'job', 143 | 'passing': True, 144 | } 145 | mocked_put.return_value = [monitor_data] 146 | 147 | monitor = cronitor.Monitor.put(**monitor_data) 148 | 149 | # Test str() returns valid JSON 150 | import json 151 | json_str = str(monitor.data) 152 | parsed = json.loads(json_str) 153 | 154 | self.assertEqual(parsed['key'], 'test-key') 155 | self.assertEqual(parsed['name'], 'Test Monitor') 156 | self.assertEqual(parsed['type'], 'job') 157 | self.assertEqual(parsed['passing'], True) 158 | 159 | # Test it's pretty formatted (contains newlines and indentation) 160 | self.assertIn('\n', json_str) 161 | self.assertIn(' ', json_str) 162 | 163 | @patch('cronitor.Monitor._put') 164 | def test_struct_repr(self, mocked_put): 165 | """Test that Struct.__repr__ is useful for debugging""" 166 | monitor_data = { 167 | 'key': 'test-key', 168 | 'name': 'Test Monitor', 169 | } 170 | mocked_put.return_value = [monitor_data] 171 | 172 | monitor = cronitor.Monitor.put(**monitor_data) 173 | 174 | # Test repr starts with Struct and contains key-value pairs 175 | repr_str = repr(monitor.data) 176 | self.assertTrue(repr_str.startswith('Struct(')) 177 | self.assertIn('key=', repr_str) 178 | self.assertIn('name=', repr_str) 179 | self.assertIn('test-key', repr_str) 180 | 181 | @patch('requests.get') 182 | def test_list_with_specific_keys(self, mocked_get): 183 | """Test Monitor.list() with specific keys fetches each individually""" 184 | monitor1_data = {'key': 'key1', 'name': 'Monitor 1', 'type': 'job'} 185 | monitor2_data = {'key': 'key2', 'name': 'Monitor 2', 'type': 'check'} 186 | 187 | # Mock responses for individual monitor fetches 188 | def get_side_effect(url, **kwargs): 189 | mock_resp = unittest.mock.Mock() 190 | mock_resp.status_code = 200 191 | if 'key1' in url: 192 | mock_resp.json.return_value = monitor1_data 193 | elif 'key2' in url: 194 | mock_resp.json.return_value = monitor2_data 195 | return mock_resp 196 | 197 | mocked_get.side_effect = get_side_effect 198 | 199 | monitors = cronitor.Monitor.list(['key1', 'key2']) 200 | 201 | # Should return 2 monitors 202 | self.assertEqual(len(monitors), 2) 203 | self.assertEqual(monitors[0].data.key, 'key1') 204 | self.assertEqual(monitors[1].data.key, 'key2') 205 | 206 | # Should have made 2 GET requests 207 | self.assertEqual(mocked_get.call_count, 2) 208 | 209 | @patch('cronitor.Monitor._req.get') 210 | def test_list_with_filters(self, mocked_get): 211 | """Test Monitor.list() with type and other filters""" 212 | monitor1 = {'key': 'job1', 'name': 'Job 1', 'type': 'job'} 213 | monitor2 = {'key': 'job2', 'name': 'Job 2', 'type': 'job'} 214 | 215 | mock_resp = unittest.mock.Mock() 216 | mock_resp.status_code = 200 217 | mock_resp.json.return_value = {'monitors': [monitor1, monitor2]} 218 | mocked_get.return_value = mock_resp 219 | 220 | monitors = cronitor.Monitor.list(type='job', group='production') 221 | 222 | # Should return 2 monitors 223 | self.assertEqual(len(monitors), 2) 224 | self.assertEqual(monitors[0].data.key, 'job1') 225 | self.assertEqual(monitors[1].data.key, 'job2') 226 | 227 | # Should have made 1 GET request with correct params 228 | self.assertEqual(mocked_get.call_count, 1) 229 | call_kwargs = mocked_get.call_args[1] 230 | self.assertEqual(call_kwargs['params']['type'], 'job') 231 | self.assertEqual(call_kwargs['params']['group'], 'production') 232 | self.assertEqual(call_kwargs['params']['page'], 1) 233 | self.assertEqual(call_kwargs['params']['pageSize'], 100) 234 | 235 | @patch('cronitor.Monitor._req.get') 236 | def test_list_with_pagination(self, mocked_get): 237 | """Test Monitor.list() with specific page and pageSize""" 238 | monitor1 = {'key': 'job1', 'name': 'Job 1'} 239 | 240 | mock_resp = unittest.mock.Mock() 241 | mock_resp.status_code = 200 242 | mock_resp.json.return_value = {'monitors': [monitor1]} 243 | mocked_get.return_value = mock_resp 244 | 245 | monitors = cronitor.Monitor.list(page=3, pageSize=25) 246 | 247 | # Should return monitors 248 | self.assertEqual(len(monitors), 1) 249 | 250 | # Should have made 1 GET request with correct pagination params 251 | self.assertEqual(mocked_get.call_count, 1) 252 | call_kwargs = mocked_get.call_args[1] 253 | self.assertEqual(call_kwargs['params']['page'], 3) 254 | self.assertEqual(call_kwargs['params']['pageSize'], 25) 255 | 256 | @patch('cronitor.Monitor._req.get') 257 | def test_list_auto_paginate(self, mocked_get): 258 | """Test Monitor.list() with auto_paginate=True fetches all pages""" 259 | # First page has 2 monitors (equals pageSize, so more pages exist) 260 | page1 = {'monitors': [{'key': 'job1', 'name': 'Job 1'}, {'key': 'job2', 'name': 'Job 2'}]} 261 | # Second page has 1 monitor (less than pageSize, so stop) 262 | page2 = {'monitors': [{'key': 'job3', 'name': 'Job 3'}]} 263 | 264 | mock_resp1 = unittest.mock.Mock() 265 | mock_resp1.status_code = 200 266 | mock_resp1.json.return_value = page1 267 | 268 | mock_resp2 = unittest.mock.Mock() 269 | mock_resp2.status_code = 200 270 | mock_resp2.json.return_value = page2 271 | 272 | mocked_get.side_effect = [mock_resp1, mock_resp2] 273 | 274 | monitors = cronitor.Monitor.list(pageSize=2, auto_paginate=True) 275 | 276 | # Should return all 3 monitors from both pages 277 | self.assertEqual(len(monitors), 3) 278 | self.assertEqual(monitors[0].data.key, 'job1') 279 | self.assertEqual(monitors[1].data.key, 'job2') 280 | self.assertEqual(monitors[2].data.key, 'job3') 281 | 282 | # Should have made 2 GET requests (page 1 and page 2) 283 | self.assertEqual(mocked_get.call_count, 2) 284 | 285 | # Verify pagination params 286 | call1_kwargs = mocked_get.call_args_list[0][1] 287 | call2_kwargs = mocked_get.call_args_list[1][1] 288 | self.assertEqual(call1_kwargs['params']['page'], 1) 289 | self.assertEqual(call2_kwargs['params']['page'], 2) 290 | 291 | @patch('cronitor.Monitor._req.get') 292 | def test_list_empty_results(self, mocked_get): 293 | """Test Monitor.list() with no matching monitors""" 294 | mock_resp = unittest.mock.Mock() 295 | mock_resp.status_code = 200 296 | mock_resp.json.return_value = {'monitors': []} 297 | mocked_get.return_value = mock_resp 298 | 299 | monitors = cronitor.Monitor.list(type='nonexistent') 300 | 301 | # Should return empty list 302 | self.assertEqual(len(monitors), 0) 303 | self.assertEqual(monitors, []) 304 | 305 | @patch('cronitor.Monitor._req.get') 306 | def test_list_api_error(self, mocked_get): 307 | """Test Monitor.list() handles API errors""" 308 | mock_resp = unittest.mock.Mock() 309 | mock_resp.status_code = 500 310 | mock_resp.text = 'Internal Server Error' 311 | mocked_get.return_value = mock_resp 312 | 313 | with self.assertRaises(cronitor.APIError): 314 | cronitor.Monitor.list(type='job') 315 | 316 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Cronitor Python Library 2 | ![Test](https://github.com/cronitorio/cronitor-python/workflows/Test/badge.svg) 3 | 4 | [Cronitor](https://cronitor.io/) provides end-to-end monitoring for background jobs, websites, APIs, and anything else that can send or receive an HTTP request. This library provides convenient access to the Cronitor API from applications written in Python. See our [API docs](https://cronitor.io/docs/api) for detailed references on configuring monitors and sending telemetry pings. 5 | 6 | In this guide: 7 | 8 | - [Installation](#Installation) 9 | - [Monitoring Background Jobs](#monitoring-background-jobs) 10 | - [Sending Telemetry Events](#sending-telemetry-events) 11 | - [Configuring Monitors](#configuring-monitors) 12 | - [Package Configuration & Env Vars](#package-configuration) 13 | - [Command Line Usage](#command-line-usage) 14 | 15 | ## Installation 16 | 17 | ``` 18 | pip install cronitor 19 | ``` 20 | 21 | ## Monitoring Background Jobs 22 | 23 | #### Celery Auto-Discover 24 | `cronitor-python` can automatically discover all of your declared Celery tasks, including your Celerybeat scheduled tasks, 25 | creating monitors for them and sending pings when tasks run, succeed, or fail. Your API keys can be found [here](https://cronitor.io/settings/api). 26 | 27 | Requires Celery 4.0 or higher. Celery auto-discover utilizes the Celery [message protocol version 2](https://docs.celeryproject.org/en/stable/internals/protocol.html#version-2). 28 | 29 | **Some important notes on support** 30 | 31 | * Tasks on [solar schedules](https://docs.celeryproject.org/en/stable/userguide/periodic-tasks.html#solar-schedules) are not supported and will be ignored. 32 | * [`django-celery-beat`](https://docs.celeryproject.org/en/stable/userguide/periodic-tasks.html#using-custom-scheduler-classes) is not yet supported, but is in the works. 33 | * If you use the default `PersistentScheduler`, the celerybeat integration overrides the celerybeat local task run database (as referenced [here](https://docs.celeryproject.org/en/stable/userguide/periodic-tasks.html#starting-the-scheduler) in the docs), named `celerybeat-schedule` by default. If you currently specify a custom location for this database, this integration will override it. **Very** few people require setting custom locations for this database. If you fall into this group and want to use `cronitor-python`'s celerybeat integration, please reach out to Cronitor support. 34 | 35 | 36 | ```python 37 | import cronitor.celery 38 | from celery import Celery 39 | 40 | app = Celery() 41 | app.conf.beat_schedule = { 42 | 'run-me-every-minute': { 43 | 'task': 'tasks.every_minute_celery_task', 44 | 'schedule': 60 45 | } 46 | } 47 | 48 | # Discover all of your celery tasks and automatically add monitoring. 49 | cronitor.celery.initialize(app, api_key="apiKey123") 50 | 51 | @app.task 52 | def every_minute_celery_task(): 53 | print("running a background job with celery...") 54 | 55 | @app.task 56 | def non_scheduled_celery_task(): 57 | print("Even though I'm not on a schedule, I'll still be monitored!") 58 | ``` 59 | 60 | If you want only to monitor Celerybeat periodic tasks, and not tasks triggered any other way, you can set `celereybeat_only=True` when initializing: 61 | ```python 62 | app = Celery() 63 | cronitor.celery.initialize(app, api_key="apiKey123", celerybeat_only=True) 64 | ``` 65 | 66 | #### Manual Integration 67 | 68 | The `@cronitor.job` is a lightweight way to monitor any background task regardless of how it is executed. It will send telemetry events before calling your function and after it exits. If your function raises an exception a `fail` event will be sent (and the exception re-raised). 69 | 70 | ```python 71 | import cronitor 72 | 73 | # your api keys can found here - https://cronitor.io/settings/api 74 | cronitor.api_key = 'apiKey123' 75 | 76 | # Apply the cronitor decorator to monitor any function. 77 | # If no monitor matches the provided key, one will be created automatically. 78 | @cronitor.job('send-invoices') 79 | def send_invoices_task(*args, **kwargs): 80 | ... 81 | ``` 82 | 83 | #### You can provide monitor attributes that will be synced when your app starts 84 | 85 | To sync attributes, provide an API key with monitor:write privileges. 86 | 87 | ```python 88 | import cronitor 89 | 90 | # Copy your SDK Integration key from https://cronitor.io/app/settings/api 91 | cronitor.api_key = 'apiKey123' 92 | 93 | @cronitor.job('send-invoices', attributes={'schedule': '0 8 * * *', 'notify': ['devops-alerts']}) 94 | def send_invoices_task(*args, **kwargs): 95 | ... 96 | ``` 97 | 98 | ## Sending Telemetry Events 99 | 100 | If you want to send a heartbeat events, or want finer control over when/how [telemetry events](https://cronitor.io/docs/telemetry-api) are sent for your jobs, you can create a monitor instance and call the `.ping` method. 101 | 102 | ```python 103 | import cronitor 104 | 105 | # your api keys can found here - https://cronitor.io/settings/api 106 | cronitor.api_key = 'apiKey123' 107 | 108 | # optionally, set an environment 109 | cronitor.environment = 'staging' 110 | 111 | monitor = cronitor.Monitor('heartbeat-monitor') 112 | monitor.ping() # send a heartbeat event 113 | 114 | # optional params can be passed as keyword arguements. 115 | # for a complete list see https://cronitor.io/docs/telemetry-api#parameters 116 | monitor.ping( 117 | state='run|complete|fail|ok', # run|complete|fail used to measure lifecycle of a job, ok used for manual reset only. 118 | message='', # message that will be displayed in alerts as well as monitor activity panel on your dashboard. 119 | metrics={ 120 | 'duration': 100, # how long the job ran (complete|fail only). cronitor will calculate this when not provided 121 | 'count': 4500, # if your job is processing a number of items you can report a count 122 | 'error_count': 10 # the number of errors that occurred while this job was running 123 | } 124 | ) 125 | ``` 126 | 127 | ## Configuring Monitors 128 | 129 | ### YAML Configuration File 130 | 131 | You can configure all of your monitors using a single YAML file. This can be version controlled and synced to Cronitor as part of 132 | a deployment or build process. For details on all of the attributes that can be set, see the [Monitor API](https://cronitor.io/docs/monitor-api) documentation. 133 | 134 | ```python 135 | import cronitor 136 | 137 | # your api keys can found here - https://cronitor.io/settings/api 138 | cronitor.api_key = 'apiKey123' 139 | 140 | cronitor.read_config('./cronitor.yaml') # parse the yaml file of monitors 141 | 142 | cronitor.validate_config() # send monitors to Cronitor for configuration validation 143 | 144 | cronitor.apply_config() # sync the monitors from the config file to Cronitor 145 | 146 | cronitor.generate_config() # generate a new config file from the Cronitor API 147 | ``` 148 | 149 | The timeout value for validate_config, apply_config and generate_config is 10 seconds by default. The value can be rewritten by setting the environment variable `CRONITOR_TIMEOUT`. It can also be rewritten by assigning a value to cronitor.timeout. 150 | 151 | ```python 152 | import cronitor 153 | 154 | cronitor.timeout = 30 155 | cronitor.apply_config() 156 | ``` 157 | 158 | The `cronitor.yaml` file includes three top level keys `jobs`, `checks`, `heartbeats`. You can configure monitors under each key by defining [monitors](https://cronitor.io/docs/monitor-api#attributes). 159 | 160 | ```yaml 161 | jobs: 162 | nightly-database-backup: 163 | schedule: 0 0 * * * 164 | notify: 165 | - devops-alert-pagerduty 166 | assertions: 167 | - metric.duration < 5 minutes 168 | 169 | send-welcome-email: 170 | schedule: every 10 minutes 171 | assertions: 172 | - metric.count > 0 173 | - metric.duration < 30 seconds 174 | 175 | checks: 176 | cronitor-homepage: 177 | request: 178 | url: https://cronitor.io 179 | regions: 180 | - us-east-1 181 | - eu-central-1 182 | - ap-northeast-1 183 | assertions: 184 | - response.code = 200 185 | - response.time < 2s 186 | 187 | cronitor-ping-api: 188 | request: 189 | url: https://cronitor.link/ping 190 | assertions: 191 | - response.body contains ok 192 | - response.time < .25s 193 | 194 | heartbeats: 195 | production-deploy: 196 | notify: 197 | alerts: ['deploys-slack'] 198 | events: true # send alert when the event occurs 199 | 200 | ``` 201 | #### Async Uploads 202 | If you are working with large YAML files (300+ monitors), you may hit timeouts when trying to sync monitors in a single http request. This workload to be processed asynchronously by adding the key `async: true` to the config file. The request will immediately return a `batch_key`. If a `webhook_url` parameter is included, Cronitor will POST to that URL with the results of the background processing and will include the `batch_key` matching the one returned in the initial response. 203 | 204 | ### Monitor.put 205 | 206 | You can also create and update monitors by calling `Monitor.put`. For details on all of the attributes that can be set see the Monitor API [documentation](https://cronitor.io/docs/monitor-api#attributes). 207 | 208 | ```python 209 | import cronitor 210 | 211 | monitors = cronitor.Monitor.put([ 212 | { 213 | 'type': 'job', 214 | 'key': 'send-customer-invoices', 215 | 'schedule': '0 0 * * *', 216 | 'assertions': [ 217 | 'metric.duration < 5 min' 218 | ], 219 | 'notify': ['devops-alerts-slack'] 220 | }, 221 | { 222 | 'type': 'check', 223 | 'key': 'Cronitor Homepage', 224 | 'schedule': 'every 45 seconds', 225 | 'request': { 226 | 'url': 'https://cronitor.io' 227 | }, 228 | 'assertions': [ 229 | 'response.code = 200', 230 | 'response.time < 600ms', 231 | ] 232 | } 233 | ]) 234 | ``` 235 | 236 | ### Listing and Inspecting Monitors 237 | 238 | You can fetch multiple monitors using `Monitor.list()` with optional filtering and pagination: 239 | 240 | ```python 241 | import cronitor 242 | 243 | # Fetch specific monitors by key 244 | monitors = cronitor.Monitor.list(['backup-job', 'health-check', 'send-invoices']) 245 | 246 | # Fetch all job monitors (first page, 100 results by default) 247 | monitors = cronitor.Monitor.list() 248 | 249 | # Fetch monitors with filters 250 | monitors = cronitor.Monitor.list(type='check', state='failing') 251 | 252 | # Fetch a specific page 253 | monitors = cronitor.Monitor.list(type='job', page=2, pageSize=50) 254 | 255 | # Fetch all pages automatically 256 | monitors = cronitor.Monitor.list(type='job', auto_paginate=True) 257 | 258 | # Search monitors 259 | monitors = cronitor.Monitor.list(search='backup') 260 | ``` 261 | 262 | After fetching a monitor, access its data using the `.data` property. Nested data is automatically accessible via attributes: 263 | 264 | ```python 265 | import cronitor 266 | 267 | # Fetch an existing monitor 268 | monitor = cronitor.Monitor('send-invoices') 269 | 270 | # Access monitor attributes 271 | print(monitor.data.name) 272 | print(monitor.data.type) 273 | print(monitor.data.schedule) 274 | 275 | # Access nested data 276 | print(monitor.data.attributes.code) 277 | if monitor.data.latest_event: 278 | print(monitor.data.latest_event.stamp) 279 | print(monitor.data.latest_event.event) 280 | 281 | # Pretty print the entire monitor as JSON 282 | print(monitor.data) 283 | ``` 284 | 285 | ### Pausing, Reseting, and Deleting 286 | 287 | ```python 288 | import cronitor 289 | 290 | monitor = cronitor.Monitor('heartbeat-monitor'); 291 | 292 | monitor.pause(24) # pause alerting for 24 hours 293 | monitor.unpause() # alias for .pause(0) 294 | monitor.ok() # manually reset to a passing state alias for monitor.ping({state: ok}) 295 | monitor.delete() # destroy the monitor 296 | ``` 297 | 298 | ## Package Configuration 299 | 300 | The package needs to be configured with your account's `API key`, which is available on the [account settings](https://cronitor.io/settings) page. You can also optionally specify an `api_version` and an `environment`. If not provided, your account default is used. These can also be supplied using the environment variables `CRONITOR_API_KEY`, `CRONITOR_API_VERSION`, `CRONITOR_ENVIRONMENT`. 301 | 302 | ```python 303 | import cronitor 304 | 305 | # your api keys can found here - https://cronitor.io/settings 306 | cronitor.api_key = 'apiKey123' 307 | cronitor.api_version = '2020-10-01' 308 | cronitor.environment = 'cluster_1_prod' 309 | ``` 310 | 311 | ## Command Line Usage 312 | 313 | ```bash 314 | >> python -m cronitor -h 315 | 316 | usage: cronitor [-h] [--apikey APIKEY] [--key KEY] [--msg MSG] 317 | (--run | --complete | --fail | --ok | --pause PAUSE) 318 | 319 | Send status messages to Cronitor ping API. 320 | 321 | optional arguments: 322 | -h, --help show this help message and exit 323 | --authkey AUTHKEY, -a AUTHKEY 324 | Auth Key from Account page 325 | --key KEY, -k KEY Unique key for the monitor to take ping 326 | --msg MSG, -m MSG Optional message to send with ping/fail 327 | --tick, -t Call ping on given monitor 328 | --run, -r Call ping with state=run on given monitor 329 | --complete, -C Call ping with state=complete on given monitor 330 | --fail, -f Call ping with state=fail on given monitor 331 | --pause PAUSE, -P PAUSE 332 | Call pause on given monitor 333 | ``` 334 | 335 | 336 | ## Contributing 337 | 338 | Pull requests and features are happily considered! By participating in this project you agree to abide by the [Code of Conduct](http://contributor-covenant.org/version/2/0). 339 | 340 | ### To contribute 341 | 342 | Fork, then clone the repo: 343 | 344 | git clone git@github.com:your-username/cronitor-python.git 345 | 346 | Set up your machine: 347 | 348 | pip install -r requirements 349 | pip install pytest 350 | 351 | Make sure the tests pass: 352 | 353 | pytest 354 | 355 | **Optional:** Run integration tests against the real Cronitor API: 356 | 357 | export CRONITOR_TEST_API_KEY=your_api_key_here 358 | pytest cronitor/tests/test_integration.py -v 359 | 360 | Make your change. Add tests for your change. Make the tests pass: 361 | 362 | pytest 363 | 364 | 365 | Push to your fork and [submit a pull request]( https://github.com/cronitorio/cronitor-python/compare/) 366 | --------------------------------------------------------------------------------