├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── app.yaml ├── app ├── __init__.py ├── bpbase.py ├── bps │ ├── __init__.py │ ├── alarm.py │ ├── audit.py │ ├── cluster.py │ ├── command.py │ ├── cont_image.py │ ├── containerize.py │ ├── index.py │ ├── myself.py │ ├── pollings.py │ ├── prune.py │ ├── redis.py │ ├── statistics.py │ ├── task.py │ └── translation.py ├── core.py ├── file_ipc.py ├── render_utils.py └── utils.py ├── config.py ├── daemon.py ├── daemonutils ├── __init__.py ├── auto_balance.py ├── bgtask.py ├── cluster_task.py ├── node_polling.py └── stats_models.py ├── dbver.py ├── dbver ├── README ├── __init__.py ├── manage.py ├── migrate.cfg └── versions │ ├── 00001_init.py │ ├── 00002_proxy_type.py │ ├── 00003_cluster_task.py │ └── __init__.py ├── main.py ├── models ├── __init__.py ├── audit.py ├── base.py ├── cluster.py ├── cluster_plan.py ├── cont_image.py ├── node.py ├── polling_stat.py ├── proxy.py ├── stats_base.py └── task.py ├── override_config.py.example ├── release_task_lock.py ├── requirements.txt ├── static ├── css │ ├── bootstrap.css │ ├── common.css │ ├── components.css │ ├── flat-ui.min.css │ └── flatstrap.css ├── fonts │ ├── digital │ │ ├── ds-digib-webfont.eot │ │ └── ds-digib-webfont.woff │ ├── glyphicons │ │ └── flat-ui-icons-regular.woff │ └── lato │ │ ├── lato-bold.woff │ │ └── lato-regular.woff ├── js │ ├── application.js │ ├── bootstrap.js │ ├── cluster_task.js │ ├── common.js │ ├── components.js │ ├── containerize.js │ ├── flat-ui.min.js │ ├── jquery.localize.min.js │ ├── jquery.min.js │ ├── prune.js │ └── redis_node.js ├── lib │ ├── css │ │ ├── d3 │ │ │ └── nv.d3.css │ │ └── font-awesome.css │ ├── fonts │ │ └── fontawesome-webfont.woff │ └── js │ │ └── d3 │ │ ├── axis.js │ │ ├── d3.v3.js │ │ ├── interactiveLayer.js │ │ ├── legend.js │ │ ├── line.js │ │ ├── lineChart.js │ │ ├── nv.d3.js │ │ ├── scatter.js │ │ ├── tooltip.js │ │ └── utils.js ├── picture │ └── retina.svg └── trans │ ├── -en.json │ ├── -zh.json │ ├── en.js │ └── zh.js ├── templates ├── audit │ └── nodes.html ├── base.html ├── blocks │ ├── modal.html │ └── pager.html ├── cluster │ ├── create.html │ ├── inactive.html │ ├── panel.html │ ├── tasks.html │ └── tasks_all.html ├── components │ ├── button.html │ ├── checkbox.html │ ├── cluster │ │ ├── eru_auto_balance.html │ │ ├── export-nodes.html │ │ ├── nodes-add.html │ │ ├── nodes-adjustment.html │ │ ├── row.html │ │ └── tasks.html │ ├── command_console.html │ ├── grid-group.html │ ├── hint.html │ ├── icon.html │ ├── info_console.html │ ├── input.html │ ├── label.html │ ├── node │ │ ├── autodiscover.html │ │ └── row.html │ ├── proxy │ │ └── label.html │ ├── select.html │ └── widgets.html ├── containerize │ ├── deploy │ │ ├── list_base.html │ │ ├── list_proxies.html │ │ ├── list_redis.html │ │ ├── manage.html │ │ ├── no_pod.html │ │ ├── proxy_table.html │ │ ├── redis_table.html │ │ └── table_base.html │ └── image │ │ └── manage_redis.html ├── index.html ├── myself │ └── thirdparty.html ├── pollings.html ├── prune │ ├── audit.html │ ├── pollings.html │ └── tasks.html ├── redis │ ├── create.html │ ├── not_found.html │ └── panel.html └── stats │ ├── base.html │ ├── proxy.html │ └── redis.html ├── test ├── __init__.py ├── alarm.py ├── auto_balance.py ├── base.py ├── cluster.py ├── containerize.py ├── http.py ├── nodes.py ├── task.py └── translation_files.py └── thirdparty ├── __init__.py ├── alarm.py ├── containerize.py ├── openfalcon.py ├── requirements.txt └── statistic.py /.gitignore: -------------------------------------------------------------------------------- 1 | local.* 2 | override_config.py 3 | dump.rdb 4 | 5 | # Byte-compiled / optimized / DLL files 6 | __pycache__/ 7 | *.py[cod] 8 | 9 | # C extensions 10 | *.so 11 | 12 | # Distribution / packaging 13 | .Python 14 | env/ 15 | bin/ 16 | build/ 17 | develop-eggs/ 18 | dist/ 19 | eggs/ 20 | lib64/ 21 | parts/ 22 | sdist/ 23 | var/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | 28 | # Installer logs 29 | pip-log.txt 30 | pip-delete-this-directory.txt 31 | 32 | # Unit test / coverage reports 33 | htmlcov/ 34 | .tox/ 35 | .coverage 36 | .cache 37 | nosetests.xml 38 | coverage.xml 39 | 40 | # Translations 41 | *.mo 42 | 43 | # Mr Developer 44 | .mr.developer.cfg 45 | .project 46 | .pydevproject 47 | 48 | # Rope 49 | .ropeproject 50 | 51 | # Django stuff: 52 | *.log 53 | *.pot 54 | 55 | # Sphinx documentation 56 | docs/_build/ 57 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 HunanTV 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # redis cluster nodes 2 | 3 | define REDIS_CLUSTER_NODE_CONF_A 4 | daemonize yes 5 | port 7100 6 | cluster-node-timeout 5000 7 | pidfile /tmp/redis_cluster_node_a.pid 8 | logfile /tmp/redis_cluster_node_a.log 9 | save "" 10 | appendonly no 11 | cluster-enabled yes 12 | cluster-config-file /tmp/redis_cluster_node_a.conf 13 | endef 14 | 15 | define REDIS_CLUSTER_NODE_CONF_B 16 | daemonize yes 17 | port 7101 18 | cluster-node-timeout 5000 19 | pidfile /tmp/redis_cluster_node_b.pid 20 | logfile /tmp/redis_cluster_node_b.log 21 | save "" 22 | appendonly no 23 | cluster-enabled yes 24 | cluster-config-file /tmp/redis_cluster_node_b.conf 25 | endef 26 | 27 | define REDIS_CLUSTER_NODE_CONF_C 28 | daemonize yes 29 | port 7102 30 | cluster-node-timeout 5000 31 | pidfile /tmp/redis_cluster_node_c.pid 32 | logfile /tmp/redis_cluster_node_c.log 33 | save "" 34 | appendonly no 35 | cluster-enabled yes 36 | cluster-config-file /tmp/redis_cluster_node_c.conf 37 | endef 38 | 39 | ifndef REDIS_SERVER 40 | REDIS_SERVER=redis-server 41 | endif 42 | 43 | export REDIS_CLUSTER_NODE_CONF_A 44 | export REDIS_CLUSTER_NODE_CONF_B 45 | export REDIS_CLUSTER_NODE_CONF_C 46 | 47 | help: 48 | @echo "Use 'make ', where is one of" 49 | @echo " clean remove temporary files" 50 | @echo " test run test suite" 51 | 52 | clean: 53 | -find . -type f -name "*.pyc" -exec rm -f "{}" \; 54 | 55 | start-test:clean-test 56 | @echo "Wait several seconds for Redis cluster ready" 57 | @sleep 1 58 | @echo "$$REDIS_CLUSTER_NODE_CONF_A" | $(REDIS_SERVER) - 59 | @echo "$$REDIS_CLUSTER_NODE_CONF_B" | $(REDIS_SERVER) - 60 | @echo "$$REDIS_CLUSTER_NODE_CONF_C" | $(REDIS_SERVER) - 61 | @sleep 5 62 | 63 | clean-test:stop-test 64 | @rm -f /tmp/redis_cluster_node*.conf 65 | @rm -f dump.rdb appendonly.aof 66 | 67 | stop-test: 68 | @test -e /tmp/redis_cluster_node_a.pid && kill `cat /tmp/redis_cluster_node_a.pid` || true 69 | @test -e /tmp/redis_cluster_node_b.pid && kill `cat /tmp/redis_cluster_node_b.pid` || true 70 | @test -e /tmp/redis_cluster_node_c.pid && kill `cat /tmp/redis_cluster_node_c.pid` || true 71 | @rm -f /tmp/redis_cluster_node_*.conf 72 | 73 | test:start-test 74 | @python -m unittest discover -s test/ -p "*.py" 75 | @make stop-test 76 | @echo "=================" 77 | @echo "| Test done \o/ |" 78 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Manage your Redis Clusters in a web GUI 2 | 3 | ![Redis Overview](http://zlo.gs/image_data/8fb3021522baf6414c107cfd3fc0ad406461d5e5) 4 | 5 | Commit a cluster migration from a HTML form 6 | 7 | ![Slot Migrating](http://zlo.gs/image_data/5dc602f756975f97a9264e6e7a94a3ad08518a2f) 8 | 9 | Display cluster nodes in a tidy table 10 | 11 | ![CLUSTER NODES](http://zlo.gs/image_data/4fda367ddacd1501337bc725002dc98384083179) 12 | 13 | RedisCtl is a set of Python toolkit with a web UI based on Flask that makes it easy to manage Redis and clusters. 14 | 15 | # Overview 16 | 17 | RedisCtl contains 18 | 19 | * a web UI that displays Redis status and receives commands 20 | * a daemon that polls each Redis and collect info and runs tasks like slots migrating for clusters 21 | 22 | And support thirdparty utilities including 23 | 24 | * Alarm: issue messages when Redis / proxy unreachable or cluster is down 25 | * Statistic: record information link memory / CPU usage, clients count, commands completed, etc; by default [OpenFalcon](https://github.com/open-falcon) is included in the source (require an extra `pip install requests` to work with OpenFalcon module) 26 | * Containerization: deploy Redis / proxy in container 27 | 28 | You could [make your own overridings](https://github.com/HunanTV/redis-ctl/wiki/Customize_App) by implementing several interfaces. ([in Chinese](https://github.com/HunanTV/redis-ctl/wiki/WIP_v0_9_customize_app_zh)). 29 | 30 | # Setup 31 | 32 | First, install Python-dev header files and libs 33 | 34 | # debain / ubuntu 35 | apt-get install python-dev 36 | 37 | # centos 38 | yum install python-devel 39 | 40 | Then clone this project and cd in to install dependencies 41 | 42 | pip install -r requirements.txt 43 | 44 | Run with all configurations default 45 | 46 | python main.py 47 | 48 | To configure the programs, both configuration source file and environment variables (convenient for docker deployment) are applicable 49 | 50 | To use a configure file, copy `override_config.py.example` to `override_config.py`, change anything you want. This file would be imported and override any default config or env vars in `config.py` if available. 51 | 52 | To use env vars, like 53 | 54 | MYSQL_USERNAME=redisctl MYSQL_PASSWORD=p@55w0rd python main.py 55 | 56 | Check `config.py` for configurable items. 57 | 58 | Run the daemon that collects Redis info 59 | 60 | python daemon.py 61 | 62 | Also you could use similar ways to configure daemon, just like setup up the main server. 63 | 64 | # IPC 65 | 66 | The server and daemon uses `/tmp/details.json` and `/tmp/poll.json` as default IPC files. You could change the directory for those temp files by passing the same `PERMDIR` environ to the web application and the daemon. 67 | -------------------------------------------------------------------------------- /app.yaml: -------------------------------------------------------------------------------- 1 | appname: "redis-ctl" 2 | entrypoints: 3 | web: 4 | cmd: "python main.py" 5 | ports: 6 | - "5000/tcp" 7 | daemon: 8 | cmd: "python daemon.py" 9 | releaselock: 10 | cmd: "python release_task_lock.py" 11 | build: "pip install -r requirements.txt" 12 | -------------------------------------------------------------------------------- /app/__init__.py: -------------------------------------------------------------------------------- 1 | from core import RedisCtl 2 | from bpbase import Blueprint 3 | 4 | __all__ = ['RedisCtl', 'Blueprint'] 5 | -------------------------------------------------------------------------------- /app/bpbase.py: -------------------------------------------------------------------------------- 1 | from functools import wraps 2 | import logging 3 | import flask 4 | 5 | import utils 6 | import models.base 7 | from thirdparty.containerize import ContainerizeExceptionBase 8 | 9 | 10 | class Blueprint(flask.Blueprint): 11 | def __init__(self, *args, **kwargs): 12 | flask.Blueprint.__init__(self, *args, **kwargs) 13 | self.app = None 14 | 15 | def register(self, app, *args, **kwargs): 16 | self.app = app 17 | flask.Blueprint.register(self, app, *args, **kwargs) 18 | 19 | def route_post(self, url_pattern): 20 | return self.route(url_pattern, methods=['POST']) 21 | 22 | def route_post_json(self, url_pattern, update_pollings=False): 23 | def wrapper(f): 24 | @self.route_post(url_pattern) 25 | @wraps(f) 26 | def g(*args, **kwargs): 27 | try: 28 | r, code = f(*args, **kwargs), 200 29 | models.base.db.session.commit() 30 | if update_pollings: 31 | self.app.write_polling_targets() 32 | except KeyError, e: 33 | r, code = { 34 | 'reason': 'missing argument', 35 | 'missing': e.message, 36 | }, 400 37 | except UnicodeEncodeError, e: 38 | r, code = {'reason': 'invalid input encoding'}, 400 39 | except ValueError, e: 40 | r, code = {'reason': e.message}, 400 41 | except ContainerizeExceptionBase, e: 42 | logging.exception(e) 43 | r, code = { 44 | 'reason': 'containerize fail', 45 | 'detail': e.message, 46 | }, 400 47 | except StandardError, e: 48 | logging.error('UNEXPECTED ERROR') 49 | logging.exception(e) 50 | r, code = {'reason': 'unexpected', 'msg': e.message}, 500 51 | if r is None: 52 | return '', code 53 | return utils.json_response(r, code) 54 | return g 55 | return wrapper 56 | -------------------------------------------------------------------------------- /app/bps/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/projecteru/redis-ctl/16ae59b6dfe3d62ecb59951bd81395c370b005ef/app/bps/__init__.py -------------------------------------------------------------------------------- /app/bps/alarm.py: -------------------------------------------------------------------------------- 1 | from flask import abort, request 2 | 3 | from app.bpbase import Blueprint 4 | from models.base import db 5 | import models.node 6 | import models.proxy 7 | 8 | bp = Blueprint('alarm', __name__, url_prefix='/set_alarm') 9 | 10 | 11 | @bp.before_request 12 | def access_control(): 13 | if not bp.app.access_ctl_user_valid(): 14 | abort(403) 15 | 16 | 17 | def _set_alarm_status(n): 18 | if n is None: 19 | raise ValueError('no such node') 20 | n.suppress_alert = int(request.form['suppress']) 21 | db.session.add(n) 22 | 23 | 24 | @bp.route_post_json('/redis', True) 25 | def set_redis_alarm(): 26 | _set_alarm_status(models.node.get_by_host_port( 27 | request.form['host'], int(request.form['port']))) 28 | 29 | 30 | @bp.route_post_json('/proxy', True) 31 | def set_proxy_alarm(): 32 | _set_alarm_status(models.proxy.get_by_host_port( 33 | request.form['host'], int(request.form['port']))) 34 | -------------------------------------------------------------------------------- /app/bps/audit.py: -------------------------------------------------------------------------------- 1 | from flask import render_template, g 2 | 3 | from app.bpbase import Blueprint 4 | import models.audit 5 | 6 | bp = Blueprint('audit', __name__, url_prefix='/audit') 7 | 8 | 9 | @bp.route('/nodes') 10 | def node_events(): 11 | return render_template( 12 | 'audit/nodes.html', page=g.page, 13 | events=models.audit.list_events(g.page * 50, 50)) 14 | -------------------------------------------------------------------------------- /app/bps/command.py: -------------------------------------------------------------------------------- 1 | import json 2 | from flask import request, abort 3 | from hiredis import ReplyError 4 | from redistrib.connection import Connection 5 | from redistrib.command import list_masters 6 | 7 | from app.utils import json_response 8 | from app.bpbase import Blueprint 9 | import models.audit 10 | 11 | bp = Blueprint('command', __name__, url_prefix='/cmd') 12 | 13 | 14 | @bp.before_request 15 | def access_control(): 16 | if not bp.app.access_ctl_user_valid(): 17 | abort(403) 18 | 19 | 20 | def _simple_cmd(host, port, *command): 21 | status = 200 22 | try: 23 | with Connection(host, port) as t: 24 | try: 25 | r = t.talk(*command) 26 | except ReplyError as e: 27 | r = {'reason': e.message} 28 | status = 400 29 | except IOError: 30 | status = 400 31 | r = {'reason': 'not reachable'} 32 | return json_response(r, status) 33 | 34 | 35 | @bp.route('/info') 36 | def exec_info(): 37 | return _simple_cmd(request.args['host'], int(request.args['port']), 'info') 38 | 39 | 40 | @bp.route('/cluster_nodes') 41 | def exec_cluster_nodes(): 42 | return _simple_cmd(request.args['host'], int(request.args['port']), 43 | 'cluster', 'nodes') 44 | 45 | 46 | def masters_detail(host, port): 47 | node_details = bp.app.polling_result()['nodes'] 48 | result = [] 49 | masters, myself = list_masters(host, port) 50 | for n in masters: 51 | r = {'host': n.host, 'port': n.port} 52 | try: 53 | r['slots_count'] = len(node_details[ 54 | '%s:%d' % (n.host, n.port)]['slots']) 55 | except KeyError: 56 | pass 57 | result.append(r) 58 | return result, myself 59 | 60 | 61 | @bp.route('/get_masters') 62 | def get_masters_info(): 63 | try: 64 | masters, myself = masters_detail( 65 | request.args['host'], int(request.args['port'])) 66 | return json_response({ 67 | 'masters': masters, 68 | 'myself': { 69 | 'role': myself.role_in_cluster, 70 | 'slots': len(myself.assigned_slots), 71 | }, 72 | }) 73 | except IOError: 74 | return json_response({ 75 | 'masters': [], 76 | 'myself': {'role': 'master', 'slots': 0}, 77 | }) 78 | 79 | 80 | @bp.route_post_json('/exec') 81 | def exec_command(): 82 | host = request.form['host'] 83 | port = int(request.form['port']) 84 | args = json.loads(request.form['cmd']) 85 | models.audit.raw_event( 86 | host, port, models.audit.EVENT_TYPE_EXEC, bp.app.get_user_id(), args) 87 | try: 88 | with Connection(host, port) as t: 89 | try: 90 | r = t.talk(*args) 91 | except ValueError as e: 92 | r = None if e.message == 'No reply' else ( 93 | '-ERROR: ' + e.message) 94 | except ReplyError as e: 95 | r = '-' + e.message 96 | except Exception as e: 97 | r = '!ERROR: ' + (e.message or ('%s' % e)) 98 | return r 99 | 100 | 101 | MAXMEM_LIMIT_LOW = 64 * 1000 * 1000 102 | 103 | @bp.route_post_json('/set_max_mem') 104 | def set_max_mem(): 105 | max_mem = int(request.form['max_mem']) 106 | if not MAXMEM_LIMIT_LOW <= max_mem <= bp.app.config_node_max_mem: 107 | raise ValueError('invalid max_mem size') 108 | host = request.form['host'] 109 | port = int(request.form['port']) 110 | 111 | models.audit.raw_event( 112 | host, port, models.audit.EVENT_TYPE_CONFIG, bp.app.get_user_id(), 113 | {'max_mem': max_mem}) 114 | 115 | with Connection(host, port) as t: 116 | m = t.talk('config', 'set', 'maxmemory', str(max_mem)) 117 | if 'ok' != m.lower(): 118 | raise ValueError('CONFIG SET MAXMEMROY redis %s:%d returns %s' % ( 119 | host, port, m)) 120 | 121 | 122 | @bp.route('/get_max_mem') 123 | def get_max_mem(): 124 | return _simple_cmd(request.args['host'], int(request.args['port']), 125 | 'config', 'get', 'maxmemory') 126 | 127 | 128 | @bp.route_post_json('/set_aof') 129 | def set_aof(): 130 | aof = 'yes' if request.form['aof'] == 'y' else 'no' 131 | host = request.form['host'] 132 | port = int(request.form['port']) 133 | 134 | models.audit.raw_event( 135 | host, port, models.audit.EVENT_TYPE_CONFIG, bp.app.get_user_id(), 136 | {'aof': aof}) 137 | 138 | with Connection(host, port) as t: 139 | m = t.talk('config', 'set', 'appendonly', aof) 140 | if 'ok' != m.lower(): 141 | raise ValueError('CONFIG SET APPENDONLY redis %s:%d returns %s' % ( 142 | host, port, m)) 143 | -------------------------------------------------------------------------------- /app/bps/cont_image.py: -------------------------------------------------------------------------------- 1 | from sqlalchemy.exc import IntegrityError 2 | from flask import request, render_template, g, abort 3 | 4 | from app.bpbase import Blueprint 5 | from app.utils import json_response, timestamp_to_datetime 6 | import models.base 7 | import models.cont_image 8 | 9 | bp = Blueprint('cont_image', __name__, url_prefix='/containerize/image') 10 | 11 | 12 | @bp.before_request 13 | def access_control(): 14 | if not bp.app.access_ctl_user_adv(): 15 | abort(403) 16 | 17 | 18 | @bp.route('/manage/redis/') 19 | def manage_redis_images(): 20 | return render_template('containerize/image/manage_redis.html', 21 | images=models.cont_image.list_redis()) 22 | 23 | 24 | @bp.route('/list/redis/') 25 | def list_active_redis_images(): 26 | return json_response([{ 27 | 'id': i.id, 28 | 'name': i.name, 29 | 'creation': i.creation, 30 | 'description': i.description, 31 | } for i in models.cont_image.list_redis()]) 32 | 33 | 34 | @bp.route('/remote/redis/') 35 | def list_remote_redis_images(): 36 | r = bp.app.container_client.list_redis_images(g.start, g.limit) 37 | return json_response(r) 38 | 39 | 40 | @bp.route_post_json('/update/redis') 41 | def update_redis_image(): 42 | image = models.cont_image.ContainerImage.query.get(int(request.form['id'])) 43 | image.description = request.form['description'] 44 | models.base.db.session.add(image) 45 | 46 | 47 | @bp.route_post_json('/add/redis') 48 | def add_redis_image(): 49 | try: 50 | r = models.cont_image.add_redis_image( 51 | request.form['name'], request.form['description'], 52 | timestamp_to_datetime(int(request.form['creation']))) 53 | return r.id 54 | except IntegrityError: 55 | models.base.db.session.rollback() 56 | return '' 57 | 58 | 59 | @bp.route_post_json('/del/redis') 60 | def del_redis_image(): 61 | models.cont_image.del_redis_image(int(request.form['id'])) 62 | -------------------------------------------------------------------------------- /app/bps/index.py: -------------------------------------------------------------------------------- 1 | from flask import render_template 2 | 3 | from app.bpbase import Blueprint 4 | import models.node as nm 5 | import models.cluster as cl 6 | 7 | bp = Blueprint('index', __name__) 8 | 9 | 10 | @bp.route('/') 11 | def index(): 12 | nodes = nm.list_all_nodes() 13 | clusters = cl.list_all() 14 | 15 | poll_result = bp.app.polling_result() 16 | node_details = poll_result['nodes'] 17 | proxy_details = poll_result['proxies'] 18 | 19 | proxies = [] 20 | for c in clusters: 21 | for p in c.proxies: 22 | p.detail = proxy_details.get('%s:%d' % (p.host, p.port), {}) 23 | p.stat = p.detail.get('stat', True) 24 | proxies.extend(c.proxies) 25 | 26 | for n in nodes: 27 | detail = node_details.get('%s:%d' % (n.host, n.port), {}) 28 | n.node_id = detail.get('node_id') 29 | n.detail = detail 30 | n.stat = detail.get('stat', True) 31 | return render_template('index.html', nodes=nodes, clusters=clusters, 32 | stats_enabled=bp.app.stats_enabled()) 33 | -------------------------------------------------------------------------------- /app/bps/myself.py: -------------------------------------------------------------------------------- 1 | from flask import render_template 2 | 3 | from app.bpbase import Blueprint 4 | 5 | bp = Blueprint('myself', __name__, url_prefix='/myself') 6 | 7 | 8 | @bp.route('/3rd') 9 | def thirdparty(): 10 | return render_template('myself/thirdparty.html', app=bp.app) 11 | -------------------------------------------------------------------------------- /app/bps/pollings.py: -------------------------------------------------------------------------------- 1 | from flask import render_template 2 | 3 | from app.bpbase import Blueprint 4 | from models.polling_stat import PollingStat 5 | 6 | bp = Blueprint('pollings', __name__) 7 | 8 | 9 | @bp.route('/stats/pollings') 10 | def pollings(): 11 | return render_template( 12 | 'pollings.html', pollings=PollingStat.query.order_by( 13 | PollingStat.id.desc()).limit(120)) 14 | -------------------------------------------------------------------------------- /app/bps/prune.py: -------------------------------------------------------------------------------- 1 | from flask import request, render_template, abort 2 | from datetime import datetime, timedelta 3 | 4 | from app.bpbase import Blueprint 5 | from models.audit import NodeEvent 6 | from models.polling_stat import PollingStat 7 | from models.task import ClusterTask, TaskStep 8 | 9 | bp = Blueprint('prune', __name__, url_prefix='/prune') 10 | 11 | 12 | @bp.before_request 13 | def access_control(): 14 | if not bp.app.access_ctl_user_adv(): 15 | abort(403) 16 | 17 | 18 | def xdays_before(x): 19 | return datetime.now() - timedelta(days=x) 20 | 21 | 22 | def objects_before(table, column, dt, limit): 23 | return table.query.filter(getattr(table, column) < dt).order_by( 24 | table.id.desc()).limit(limit).all() 25 | 26 | 27 | def delete_before(table, column, id, dt): 28 | obj = table.query.get(id) 29 | if getattr(obj, column) < dt: 30 | table.query.filter(table.id <= obj.id).delete() 31 | 32 | 33 | @bp.route('/list_audit') 34 | def audit(): 35 | dt = xdays_before(30) 36 | ev = objects_before(NodeEvent, 'creation', dt, 300) 37 | first = ev[0] if len(ev) > 0 else None 38 | return render_template('prune/audit.html', datetime=dt, events=ev, 39 | first=first) 40 | 41 | 42 | @bp.route_post_json('/do_audit') 43 | def do_audit(): 44 | delete_before(NodeEvent, 'creation', int(request.form['id']), 45 | xdays_before(30)) 46 | 47 | 48 | @bp.route('/list_pollings') 49 | def pollings(): 50 | dt = xdays_before(7) 51 | pl = objects_before(PollingStat, 'polling_time', dt, 300) 52 | first = pl[0] if len(pl) > 0 else None 53 | return render_template('prune/pollings.html', datetime=dt, pollings=pl, 54 | first=first) 55 | 56 | 57 | @bp.route_post_json('/do_pollings') 58 | def do_pollings(): 59 | delete_before(PollingStat, 'polling_time', int(request.form['id']), 60 | xdays_before(7)) 61 | 62 | 63 | @bp.route('/list_tasks') 64 | def tasks(): 65 | dt = xdays_before(90) 66 | tasks = objects_before(ClusterTask, 'completion', dt, 300) 67 | first = tasks[0] if len(tasks) > 0 else None 68 | return render_template('prune/tasks.html', datetime=dt, tasks=tasks, 69 | first=first) 70 | 71 | 72 | @bp.route_post_json('/do_tasks') 73 | def do_tasks(): 74 | t = ClusterTask.query.get(int(request.form['id'])) 75 | if t.completion < xdays_before(90): 76 | TaskStep.query.filter(TaskStep.task_id <= t.id).delete() 77 | ClusterTask.query.filter(ClusterTask.id <= t.id).delete() 78 | -------------------------------------------------------------------------------- /app/bps/redis.py: -------------------------------------------------------------------------------- 1 | from flask import render_template, abort, request 2 | 3 | from app.utils import json_response, parse_config 4 | from app.bpbase import Blueprint 5 | import models.node 6 | import models.audit 7 | from redistrib.connection import Connection 8 | from hiredis.hiredis import ReplyError 9 | 10 | bp = Blueprint('redis', __name__, url_prefix='/redis') 11 | 12 | 13 | @bp.before_request 14 | def access_control(): 15 | if not bp.app.access_ctl_user_valid(): 16 | abort(403) 17 | 18 | 19 | @bp.route('/panel//') 20 | def node_panel(host, port): 21 | node = models.node.get_by_host_port(host, port) 22 | if node is None: 23 | return render_template('redis/not_found.html', 24 | host=host, port=port), 404 25 | return render_template( 26 | 'redis/panel.html', node=node, 27 | max_mem_limit=bp.app.config_node_max_mem) 28 | 29 | 30 | @bp.route('/register') 31 | def register_redis(): 32 | return render_template('redis/create.html') 33 | 34 | 35 | @bp.route('/list_free') 36 | def list_free(): 37 | return json_response([{ 38 | 'host': n.host, 39 | 'port': n.port, 40 | } for n in models.node.list_free()]) 41 | 42 | 43 | @bp.route_post_json('/add', True) 44 | def add_redis(): 45 | host = request.form['host'] 46 | port = int(request.form['port']) 47 | # do some necessary validation 48 | status = 0 49 | reason = 'success' 50 | try: 51 | with Connection(host, port) as t: 52 | try: 53 | info = t.talk("info") 54 | info_dict = parse_config(info) 55 | if info_dict['cluster_enabled'] == '0': 56 | status = 3 57 | reason = 'not in cluster mode' 58 | except ReplyError as e: 59 | reason = e.message 60 | status = 2 61 | except IOError, e: 62 | status = 1 63 | reason = e.message 64 | if status == 0: 65 | models.node.create_instance(host, port) 66 | models.audit.raw_event(host, port, models.audit.EVENT_TYPE_CREATE, 67 | bp.app.get_user_id()) 68 | return {'reason': reason, 'status': status} 69 | 70 | 71 | @bp.route_post_json('/del', True) 72 | def del_redis(): 73 | host = request.form['host'] 74 | port = int(request.form['port']) 75 | models.node.delete_free_instance(host, port) 76 | models.audit.raw_event(host, port, models.audit.EVENT_TYPE_DELETE, 77 | bp.app.get_user_id()) 78 | -------------------------------------------------------------------------------- /app/bps/statistics.py: -------------------------------------------------------------------------------- 1 | import re 2 | import time 3 | from flask import render_template, request 4 | 5 | from app.bpbase import Blueprint 6 | from app.utils import json_response 7 | 8 | bp = Blueprint('stats', __name__, url_prefix='/stats') 9 | 10 | PAT_HOST = re.compile('^[-.a-zA-Z0-9]+$') 11 | 12 | REDIS_MAX_FIELDS = [ 13 | 'used_cpu_sys', 'used_cpu_user', 'connected_clients', 14 | 'total_commands_processed', 'evicted_keys', 'expired_keys', 15 | 'keyspace_misses', 'keyspace_hits', 'keys', 16 | ] 17 | REDIS_AVG_FIELDS = ['used_memory', 'used_memory_rss', 'response_time'] 18 | REDIS_FIELDS = {} 19 | for f in REDIS_MAX_FIELDS: 20 | REDIS_FIELDS[f] = 'MAX' 21 | for f in REDIS_AVG_FIELDS: 22 | REDIS_FIELDS[f] = 'AVERAGE' 23 | 24 | PROXY_MAX_FIELDS = ['connected_clients', 'mem_buffer_alloc', 25 | 'completed_commands', 'used_cpu_sys', 'used_cpu_user'] 26 | PROXY_AVG_FIELDS = ['command_elapse', 'remote_cost'] 27 | PROXY_FIELDS = {} 28 | for f in PROXY_MAX_FIELDS: 29 | PROXY_FIELDS[f] = 'MAX' 30 | for f in PROXY_AVG_FIELDS: 31 | PROXY_FIELDS[f] = 'AVERAGE' 32 | 33 | 34 | @bp.route('/redis') 35 | def redis(): 36 | return render_template('stats/redis.html', host=request.args['host'], 37 | port=int(request.args['port'])) 38 | 39 | 40 | @bp.route('/proxy') 41 | def proxy(): 42 | return render_template('stats/proxy.html', host=request.args['host'], 43 | port=int(request.args['port'])) 44 | 45 | def _parse_args(args): 46 | host = args['host'] 47 | if not PAT_HOST.match(host): 48 | raise ValueError('Invalid hostname') 49 | port = int(args['port']) 50 | limit = min(int(args.get('limit', 100)), 500) 51 | interval = max(int(args.get('interval', 8)), 8) 52 | return host, port, limit, interval, limit * interval * 60 53 | 54 | 55 | @bp.route('/fetchredis') 56 | def fetch_redis(): 57 | host, port, limit, interval, span = _parse_args(request.args) 58 | now = int(time.time()) 59 | return json_response(bp.app.stats_query( 60 | '%s:%d' % (host, port), REDIS_FIELDS, span, now, interval)) 61 | 62 | 63 | @bp.route('/fetchproxy') 64 | def fetch_proxy(): 65 | host, port, limit, interval, span = _parse_args(request.args) 66 | now = int(time.time()) 67 | return json_response(bp.app.stats_query( 68 | '%s:%d' % (host, port), PROXY_FIELDS, span, now, interval)) 69 | -------------------------------------------------------------------------------- /app/bps/translation.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | from flask import safe_join, send_file 3 | 4 | from app.bpbase import Blueprint 5 | from app.utils import json_response 6 | 7 | bp = Blueprint('translation', __name__) 8 | 9 | 10 | @bp.route('/trans/') 11 | def translation(path): 12 | p = safe_join('static/trans', path) 13 | if os.path.exists(p): 14 | return send_file(p, mimetype='text/javascript', conditional=True) 15 | return json_response({}) 16 | -------------------------------------------------------------------------------- /app/file_ipc.py: -------------------------------------------------------------------------------- 1 | import os 2 | import config 3 | import logging 4 | import json 5 | 6 | import models.node as nm 7 | import models.proxy as pr 8 | 9 | INSTANCE_FILE = os.path.join(config.PERMDIR, 'details.json') 10 | INSTANCE_INTERMEDIA_FILE = os.path.join(config.PERMDIR, 'details.tmp.json') 11 | 12 | 13 | def write_details(nodes, proxies): 14 | with open(INSTANCE_INTERMEDIA_FILE, 'w') as f: 15 | f.write(json.dumps({'nodes': nodes, 'proxies': proxies})) 16 | os.rename(INSTANCE_INTERMEDIA_FILE, INSTANCE_FILE) 17 | 18 | 19 | def read_details(): 20 | try: 21 | with open(INSTANCE_FILE, 'r') as f: 22 | return json.loads(f.read()) 23 | except IOError, e: 24 | logging.exception(e) 25 | return {'nodes': {}, 'proxies': {}} 26 | 27 | POLL_FILE = os.path.join(config.PERMDIR, 'poll.json') 28 | POLL_INTERMEDIA_FILE = os.path.join(config.PERMDIR, 'poll.tmp.json') 29 | 30 | 31 | def write_poll(nodes, proxies): 32 | with open(POLL_INTERMEDIA_FILE, 'w') as f: 33 | f.write(json.dumps({ 34 | 'nodes': nodes, 35 | 'proxies': proxies, 36 | })) 37 | os.rename(POLL_INTERMEDIA_FILE, POLL_FILE) 38 | 39 | 40 | def read_poll(): 41 | try: 42 | with open(POLL_FILE, 'r') as f: 43 | return json.loads(f.read()) 44 | except IOError, e: 45 | logging.exception(e) 46 | return {'nodes': [], 'proxies': []} 47 | 48 | 49 | def write_nodes(nodes, proxies): 50 | poll_nodes = [] 51 | for n in nodes: 52 | i = { 53 | 'host': n.host, 54 | 'port': n.port, 55 | 'suppress_alert': n.suppress_alert, 56 | } 57 | poll_nodes.append(i) 58 | write_poll( 59 | poll_nodes, 60 | [{ 61 | 'host': p.host, 62 | 'port': p.port, 63 | 'suppress_alert': p.suppress_alert, 64 | } for p in proxies]) 65 | 66 | 67 | def write_nodes_proxies_from_db(): 68 | write_nodes(nm.list_all_nodes(), pr.list_all()) 69 | -------------------------------------------------------------------------------- /app/render_utils.py: -------------------------------------------------------------------------------- 1 | from flask import render_template, Markup 2 | 3 | 4 | def component(tp, **kwargs): 5 | return Markup(render_template('components/%s.html' % tp, **kwargs)) 6 | 7 | 8 | def g_icon(icon, color=None): 9 | return component('icon', icon=icon, color=color) 10 | 11 | 12 | def g_label(text, size=2, offset=0, id=None, cls=None, data=None, lcl=None): 13 | if lcl is None and id is not None: 14 | lcl = 'lbl-' + id 15 | return component('label', text=text, size=size, offset=offset, id=id, 16 | cls=cls or [], data=data or {}, lcl=lcl) 17 | 18 | 19 | def g_hint(text, size=2, offset=0, id=None, cls=None, data=None, lcl=None): 20 | if lcl is None and id is not None: 21 | lcl = 'lbl-' + id 22 | return component('hint', text=text, size=size, offset=offset, id=id, 23 | cls=cls or [], data=data or {}, lcl=lcl) 24 | 25 | 26 | def g_input(size=2, offset=0, id=None, cls=None, value=None, placeholder=None, 27 | addon=None, readonly=False, data=None, lcl=None): 28 | if lcl is None and id is not None: 29 | lcl = 'input-' + id 30 | return component('input', size=size, offset=offset, id=id, cls=cls or [], 31 | value=value or '', placeholder=placeholder or '', 32 | addon=addon, readonly=readonly, data=data or {}) 33 | 34 | 35 | def g_select(size=1, offset=0, id=None, cls=None, value=None, 36 | addon=None, readonly=False, options=None, lcl=None): 37 | if lcl is None and id is not None: 38 | lcl = 'select-' + id 39 | return component('select', size=size, offset=offset, id=id, cls=cls or [], 40 | value=value or '', addon=addon, readonly=readonly, options=options or []) 41 | 42 | 43 | def g_button(text, size=2, offset=0, color='default', id=None, cls=None, 44 | icon=None, data=None, lcl=None): 45 | if lcl is None and id is not None: 46 | lcl = 'button-' + id 47 | return component('button', text=text, size=size, offset=offset, id=id, 48 | color=color, cls=cls or [], icon=icon, data=data or {}, 49 | lcl=lcl) 50 | 51 | 52 | def g_checkbox(text, size=2, offset=0, color='default', checked=False, 53 | id=None, cls=None, data=None, lcl=None): 54 | if lcl is None and id is not None: 55 | lcl = 'checkbox-' + id 56 | return component('checkbox', text=text, size=size, offset=offset, 57 | color=color, checked=checked, id=id, cls=cls or [], 58 | data=data or {}, lcl=lcl) 59 | 60 | 61 | def f_strftime(dt, fmt='%Y-%m-%d %H:%M:%S'): 62 | if not dt: 63 | return '' 64 | return dt.strftime(fmt.encode('utf-8')).decode('utf-8') 65 | -------------------------------------------------------------------------------- /app/utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import calendar 3 | from datetime import datetime 4 | import flask 5 | 6 | 7 | def tojson(obj): 8 | def default(obj): 9 | if isinstance(obj, datetime): 10 | return long(1000 * calendar.timegm(obj.timetuple())) 11 | return obj 12 | return json.dumps(obj, default=default).replace( 13 | '<', u'\\u003c').replace('>', u'\\u003e').replace( 14 | '&', u'\\u0026').replace("'", u'\\u0027') 15 | 16 | 17 | def json_response(obj, status_code=200): 18 | r = flask.Response(tojson(obj), mimetype='application/json') 19 | r.status_code = status_code 20 | return r 21 | 22 | 23 | def datetime_to_timestamp(dt): 24 | return calendar.timegm(dt.timetuple()) 25 | 26 | 27 | def datetime_str_to_timestamp(dt_str, fmt='%Y-%m-%d %H:%M:%S'): 28 | return datetime_to_timestamp(datetime.strptime(dt_str, fmt)) 29 | 30 | 31 | def timestamp_to_datetime(ts): 32 | return datetime.utcfromtimestamp(ts) 33 | 34 | def parse_config(config): 35 | lines = config.split('\n') 36 | st = {} 37 | for ln in lines: 38 | ln = ln.strip(" \t\n\r") 39 | if len(ln) > 0 and ln[0] != '#' and ln.find(':') > 0: 40 | k, v = ln.split(':', 1) 41 | st[k] = v 42 | return st -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | import tempfile 4 | from werkzeug.utils import import_string 5 | 6 | APP_CLASS = os.getenv('APP_CLASS', 'app.RedisCtl') 7 | SERVER_PORT = int(os.getenv('SERVER_PORT', 5000)) 8 | 9 | MYSQL_HOST = os.getenv('MYSQL_HOST', 'localhost') 10 | MYSQL_PORT = int(os.getenv('MYSQL_PORT', '3306')) 11 | MYSQL_USERNAME = os.getenv('MYSQL_USERNAME', 'root') 12 | MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD', '') 13 | MYSQL_DATABASE = os.getenv('MYSQL_DATABASE', 'redisctl') 14 | MYSQL_CHARSET = os.getenv('MYSQL_CHARSET', 'utf8') 15 | 16 | LOG_LEVEL = getattr(logging, os.getenv('LOG_LEVEL', 'info').upper()) 17 | LOG_FILE = os.getenv('LOG_FILE', '') 18 | LOG_FORMAT = os.getenv('LOG_FORMAT', '%(levelname)s:%(asctime)s:%(message)s') 19 | 20 | DEBUG = int(os.getenv('DEBUG', 0)) 21 | POLL_INTERVAL = int(os.getenv('POLL_INTERVAL', 10)) 22 | PERMDIR = os.getenv('PERMDIR', tempfile.gettempdir()) 23 | NODE_MAX_MEM = int(os.getenv('NODE_MAX_MEM', 2048 * 1000 * 1000)) 24 | NODES_EACH_THREAD = int(os.getenv('NODES_EACH_THREAD', 10)) 25 | REDIS_CONNECT_TIMEOUT = int(os.getenv('REDIS_CONNECT_TIMEOUT', 5)) 26 | MICRO_PLAN_MEM = int(os.getenv('MICRO_PLAN_MEM', 108 * 1000 * 1000)) 27 | 28 | # ========================= # 29 | # Thirdparty configurations # 30 | # ========================= # 31 | 32 | OPEN_FALCON = { 33 | 'host_query': os.getenv('OPEN_FALCON_HOST_QUERY', ''), 34 | 'host_write': os.getenv('OPEN_FALCON_HOST_WRITE', ''), 35 | 'port_query': int(os.getenv('OPEN_FALCON_PORT_QUERY', 9966)), 36 | 'port_write': int(os.getenv('OPEN_FALCON_PORT_WRITE', 8433)), 37 | 'db': os.getenv('OPEN_FALCON_DATABASE', 'redisctlstats'), 38 | 'interval': int(os.getenv('OPEN_FALCON_ANTICIPATED_INTERVAL', 30)), 39 | } 40 | 41 | # ALARM need to be set to actually subclass of thirdparty.alarm.Base 42 | # eg: ALARM = HttpAlarm("http://your-alarm-msg-sending-api") 43 | ALARM = None 44 | 45 | CONTAINER = None 46 | 47 | try: 48 | from override_config import * 49 | except ImportError: 50 | pass 51 | 52 | App = import_string(APP_CLASS) 53 | -------------------------------------------------------------------------------- /daemon.py: -------------------------------------------------------------------------------- 1 | import gevent 2 | import gevent.monkey 3 | 4 | gevent.monkey.patch_all() 5 | 6 | import config 7 | from daemonutils.node_polling import NodeStatCollector 8 | from daemonutils.cluster_task import TaskPoller 9 | 10 | 11 | def run(interval, app): 12 | daemons = [ 13 | TaskPoller(app, interval), 14 | NodeStatCollector(app, interval), 15 | ] 16 | for d in daemons: 17 | d.start() 18 | for d in daemons: 19 | d.join() 20 | 21 | 22 | def main(): 23 | app = config.App(config) 24 | run(config.POLL_INTERVAL, app) 25 | 26 | if __name__ == '__main__': 27 | main() 28 | -------------------------------------------------------------------------------- /daemonutils/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/projecteru/redis-ctl/16ae59b6dfe3d62ecb59951bd81395c370b005ef/daemonutils/__init__.py -------------------------------------------------------------------------------- /daemonutils/auto_balance.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from models.base import db 4 | import models.node 5 | import models.task 6 | import models.audit 7 | 8 | 9 | def _deploy_node(pod, aof, host, app): 10 | depl = app.container_client.deploy_redis(pod, aof, 'macvlan', host=host) 11 | cid = depl['container_id'] 12 | h = depl['address'] 13 | models.node.create_eru_instance(h, 6379, cid) 14 | return cid, h 15 | 16 | 17 | def _rm_containers(cids, app): 18 | app.container_client.rm_containers(cids) 19 | for c in cids: 20 | try: 21 | models.node.delete_eru_instance(c) 22 | except ValueError as e: 23 | logging.exception(e) 24 | 25 | 26 | def _prepare_master_node(node, pod, aof, host, app): 27 | cid, new_node_host = _deploy_node(pod, aof, host, app) 28 | try: 29 | task = models.task.ClusterTask( 30 | cluster_id=node.assignee_id, 31 | task_type=models.task.TASK_TYPE_AUTO_BALANCE, 32 | user_id=app.default_user_id()) 33 | db.session.add(task) 34 | db.session.flush() 35 | logging.info( 36 | 'Node deployed: container id=%s host=%s; joining cluster %d' 37 | ' [create task %d] use host %s', 38 | cid, new_node_host, node.assignee_id, task.id, host) 39 | task.add_step( 40 | 'join', cluster_id=node.assignee_id, 41 | cluster_host=node.host, cluster_port=node.port, 42 | newin_host=new_node_host, newin_port=6379) 43 | return task, cid, new_node_host 44 | except BaseException as exc: 45 | logging.exception(exc) 46 | logging.info('Remove container %s and rollback', cid) 47 | _rm_containers([cid], app) 48 | db.session.rollback() 49 | raise 50 | 51 | 52 | def _add_slaves(slaves, task, cluster_id, master_host, pod, aof, app): 53 | cids = [] 54 | hosts = [] 55 | try: 56 | for s in slaves: 57 | logging.info('Auto deploy slave for master %s [task %d],' 58 | ' use host %s', master_host, task.id, s.get('host')) 59 | cid, new_host = _deploy_node(pod, aof, s.get('host'), app) 60 | cids.append(cid) 61 | hosts.append(new_host) 62 | task.add_step('replicate', cluster_id=cluster_id, 63 | master_host=master_host, master_port=6379, 64 | slave_host=new_host, slave_port=6379) 65 | return cids, hosts 66 | except BaseException as exc: 67 | logging.info('Remove container %s and rollback', cids) 68 | _rm_containers(cids, app) 69 | db.session.rollback() 70 | raise 71 | 72 | 73 | def add_node_to_balance_for(host, port, plan, slots, app): 74 | node = models.node.get_by_host_port(host, int(port)) 75 | if node is None or node.assignee_id is None: 76 | logging.info( 77 | 'No node or cluster found for %s:%d (This should be a corrupt)', 78 | host, port) 79 | return 80 | if node.assignee.current_task is not None: 81 | logging.info( 82 | 'Fail to auto balance cluster %d for node %s:%d : busy', 83 | node.assignee_id, host, port) 84 | return 85 | 86 | task, cid, new_host = _prepare_master_node( 87 | node, plan.pod, plan.aof, plan.host, app) 88 | cids = [cid] 89 | hosts = [new_host] 90 | try: 91 | cs, hs = _add_slaves( 92 | plan.slaves, task, node.assignee_id, 93 | new_host, plan.pod, plan.aof, app) 94 | cids.extend(cs) 95 | hosts.extend(hs) 96 | 97 | migrating_slots = slots[: len(slots) / 2] 98 | task.add_step( 99 | 'migrate', src_host=node.host, src_port=node.port, 100 | dst_host=new_host, dst_port=6379, slots=migrating_slots) 101 | logging.info('Migrating %d slots from %s to %s', 102 | len(migrating_slots), host, new_host) 103 | db.session.add(task) 104 | db.session.flush() 105 | lock = task.acquire_lock() 106 | if lock is not None: 107 | logging.info('Auto balance task %d has been emit; lock id=%d', 108 | task.id, lock.id) 109 | for h in hosts: 110 | models.audit.eru_event( 111 | h, 6379, models.audit.EVENT_TYPE_CREATE, 112 | app.default_user_id(), plan.balance_plan_json) 113 | return app.write_polling_targets() 114 | logging.info('Auto balance task fail to lock,' 115 | ' discard auto balance this time.' 116 | ' Delete container id=%s', cids) 117 | _rm_containers(cids, app) 118 | except BaseException as exc: 119 | logging.info('Remove container %s and rollback', cids) 120 | _rm_containers(cids, app) 121 | db.session.rollback() 122 | raise 123 | -------------------------------------------------------------------------------- /daemonutils/bgtask.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from datetime import datetime 3 | from socket import error as SocketError 4 | from hiredis import ProtocolError 5 | import redistrib.command 6 | 7 | import config 8 | from models.base import db, commit_session 9 | from models.node import get_by_host_port as get_node_by_host_port 10 | from models.cluster import remove_empty_cluster 11 | 12 | # A task execution should returns True to indicate it's done 13 | # or False if it needs a second run 14 | # Particularly, slots migrating task may need several runs 15 | 16 | def _launch(command, host_port_list): 17 | redistrib.command.create({(a['host'], a['port']) for a in host_port_list}, 18 | max_slots=256) 19 | return True 20 | 21 | 22 | def _fix_migrating(_, host, port): 23 | redistrib.command.fix_migrating(host, port) 24 | return True 25 | 26 | 27 | def _join(_, cluster_id, cluster_host, cluster_port, newin_host, newin_port): 28 | redistrib.command.add_node(cluster_host, cluster_port, newin_host, 29 | newin_port) 30 | n = get_node_by_host_port(newin_host, newin_port) 31 | if n is None: 32 | return True 33 | n.assignee_id = cluster_id 34 | db.session.add(n) 35 | commit_session() 36 | return True 37 | 38 | 39 | def _replicate(_, cluster_id, master_host, master_port, slave_host, 40 | slave_port): 41 | redistrib.command.replicate(master_host, master_port, slave_host, 42 | slave_port) 43 | n = get_node_by_host_port(slave_host, slave_port) 44 | if n is None: 45 | return True 46 | n.assignee_id = cluster_id 47 | db.session.add(n) 48 | commit_session() 49 | return True 50 | 51 | 52 | NOT_IN_CLUSTER_MESSAGE = 'not in a cluster' 53 | 54 | 55 | def _quit(_, cluster_id, host, port): 56 | try: 57 | me = redistrib.command.list_nodes(host, port, host)[1] 58 | if len(me.assigned_slots) != 0: 59 | raise ValueError('node still holding slots') 60 | redistrib.command.quit_cluster(host, port) 61 | except SocketError, e: 62 | logging.exception(e) 63 | logging.info('Remove instance from cluster on exception') 64 | except ProtocolError, e: 65 | if NOT_IN_CLUSTER_MESSAGE not in e.message: 66 | raise 67 | 68 | remove_empty_cluster(cluster_id) 69 | n = get_node_by_host_port(host, port) 70 | if n is not None: 71 | n.assignee_id = None 72 | db.session.add(n) 73 | commit_session() 74 | return True 75 | 76 | 77 | def _migrate_slots(command, src_host, src_port, dst_host, dst_port, slots, 78 | start=0): 79 | while start < len(slots): 80 | begin = datetime.now() 81 | redistrib.command.migrate_slots(src_host, src_port, dst_host, dst_port, 82 | [slots[start]]) 83 | start += 1 84 | if (datetime.now() - begin).seconds >= config.POLL_INTERVAL: 85 | command.args['start'] = start 86 | command.save() 87 | commit_session() 88 | return start == len(slots) 89 | return True 90 | 91 | 92 | TASK_MAP = { 93 | 'launch': _launch, 94 | 'fix_migrate': _fix_migrating, 95 | 'migrate': _migrate_slots, 96 | 'join': _join, 97 | 'replicate': _replicate, 98 | 'quit': _quit, 99 | } 100 | -------------------------------------------------------------------------------- /daemonutils/cluster_task.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import time 3 | import logging 4 | import traceback 5 | from datetime import datetime 6 | from sqlalchemy.exc import SQLAlchemyError, IntegrityError 7 | 8 | from bgtask import TASK_MAP 9 | from models.base import db, commit_session 10 | from models.task import ClusterTask, TaskStep 11 | import models.task 12 | 13 | 14 | class TaskRunner(threading.Thread): 15 | def __init__(self, app, task_id, step_id): 16 | threading.Thread.__init__(self) 17 | self.app = app 18 | self.task_id = task_id 19 | self.step_id = step_id 20 | 21 | def run(self): 22 | with self.app.app_context(): 23 | task = ClusterTask.query.get(self.task_id) 24 | if task is None: 25 | # not possible gonna happen 26 | return 27 | try: 28 | step = TaskStep.query.get(self.step_id) 29 | 30 | # check again the step haven't run yet 31 | if step.completion is not None: 32 | return task.check_completed() 33 | 34 | logging.info('Execute step %d', step.id) 35 | if not step.execute(TASK_MAP): 36 | task.fail('Step fails') 37 | commit_session() 38 | return 39 | lock = task.acquired_lock() 40 | lock.step = None 41 | db.session.add(lock) 42 | commit_session() 43 | task.check_completed() 44 | except (StandardError, SQLAlchemyError), e: 45 | logging.exception(e) 46 | db.session.rollback() 47 | task.exec_error = traceback.format_exc() 48 | task.completion = datetime.now() 49 | db.session.add(task) 50 | commit_session() 51 | 52 | 53 | def try_create_exec_thread_by_task(t, app): 54 | t.check_completed() 55 | if t.completion is not None: 56 | return None 57 | if not t.runnable(): 58 | return None 59 | 60 | lock = t.acquire_lock() 61 | if lock is None: 62 | return None 63 | 64 | step = t.next_step() 65 | 66 | # When decide to run a task, it's possible that 67 | # its next step has been started at the last poll. 68 | # So we check 69 | # if no step have been bound to the lock, bind the next 70 | # if the step bound to the lock is still running, skip it 71 | # the step bound to the lock is completed, bind the next 72 | if lock.step_id is None: 73 | lock.step_id = step.id 74 | db.session.add(lock) 75 | elif lock.step.completion is None: 76 | return None 77 | else: 78 | lock.step_id = step.id 79 | db.session.add(lock) 80 | 81 | try: 82 | commit_session() 83 | except IntegrityError: 84 | return None 85 | 86 | logging.debug('Run task %d', t.id) 87 | return TaskRunner(app, t.id, step.id) 88 | 89 | 90 | class TaskPoller(threading.Thread): 91 | def __init__(self, app, interval): 92 | threading.Thread.__init__(self) 93 | self.daemon = True 94 | self.app = app 95 | self.interval = interval 96 | 97 | def _shot(self): 98 | for task in models.task.undone_tasks(): 99 | t = try_create_exec_thread_by_task(task, self.app) 100 | if t is not None: 101 | t.start() 102 | 103 | def run(self): 104 | commit = False 105 | while True: 106 | logging.debug('Run tasks') 107 | with self.app.app_context(): 108 | try: 109 | self._shot() 110 | except Exception as e: 111 | logging.error('Unexpected Error %s', e.message) 112 | logging.exception(e) 113 | time.sleep(self.interval) 114 | -------------------------------------------------------------------------------- /daemonutils/node_polling.py: -------------------------------------------------------------------------------- 1 | import time 2 | import logging 3 | import random 4 | import threading 5 | 6 | from config import NODES_EACH_THREAD 7 | from stats_models import RedisNodeStatus, ProxyStatus 8 | from models.base import db, commit_session 9 | from models.polling_stat import PollingStat 10 | 11 | 12 | class Poller(threading.Thread): 13 | def __init__(self, nodes): 14 | threading.Thread.__init__(self) 15 | self.daemon = True 16 | self.nodes = nodes 17 | logging.debug('Poller %x distributed %d nodes', 18 | id(self), len(self.nodes)) 19 | 20 | def run(self): 21 | for node in self.nodes: 22 | node.collect_stats() 23 | 24 | CACHING_NODES = {} 25 | 26 | 27 | def _load_from(cls, app, nodes): 28 | def update_node_settings(node, file_settings): 29 | node.suppress_alert = file_settings.get('suppress_alert') 30 | node.balance_plan = file_settings.get('balance_plan') 31 | node.app = app 32 | 33 | r = [] 34 | for n in nodes: 35 | if (n['host'], n['port']) in CACHING_NODES: 36 | cache_node = CACHING_NODES[(n['host'], n['port'])] 37 | r.append(cache_node) 38 | update_node_settings(cache_node, n) 39 | continue 40 | loaded_node = cls.get_by(n['host'], n['port']) 41 | CACHING_NODES[(n['host'], n['port'])] = loaded_node 42 | update_node_settings(loaded_node, n) 43 | r.append(loaded_node) 44 | return r 45 | 46 | 47 | def save_polling_stat(nodes, proxies): 48 | nodes_ok = [] 49 | nodes_fail = [] 50 | proxies_ok = [] 51 | proxies_fail = [] 52 | 53 | for n in nodes: 54 | if n.details['stat']: 55 | nodes_ok.append(n.addr) 56 | else: 57 | nodes_fail.append(n.addr) 58 | 59 | for p in proxies: 60 | if p.details['stat']: 61 | proxies_ok.append(p.addr) 62 | else: 63 | proxies_fail.append(p.addr) 64 | 65 | db.session.add(PollingStat(nodes_ok, nodes_fail, proxies_ok, proxies_fail)) 66 | 67 | 68 | class NodeStatCollector(threading.Thread): 69 | def __init__(self, app, interval): 70 | threading.Thread.__init__(self) 71 | self.daemon = True 72 | self.app = app 73 | self.interval = interval 74 | 75 | def _shot(self): 76 | self.app.on_loop_begin() 77 | poll = self.app.polling_targets() 78 | nodes = _load_from(RedisNodeStatus, self.app, poll['nodes']) 79 | proxies = _load_from(ProxyStatus, self.app, poll['proxies']) 80 | # commit because `get_by` may create new nodes 81 | # to reattach session they must be persisted 82 | commit_session() 83 | 84 | all_nodes = nodes + proxies 85 | random.shuffle(all_nodes) 86 | pollers = [Poller(all_nodes[i: i + NODES_EACH_THREAD]) 87 | for i in xrange(0, len(all_nodes), NODES_EACH_THREAD)] 88 | for p in pollers: 89 | p.start() 90 | time.sleep(self.interval) 91 | 92 | for p in pollers: 93 | p.join() 94 | for p in pollers: 95 | for n in p.nodes: 96 | n.add_to_db() 97 | 98 | save_polling_stat(nodes, proxies) 99 | commit_session() 100 | logging.debug('Total %d nodes, %d proxies', len(nodes), len(proxies)) 101 | self.app.write_polling_details({n.addr: n.details for n in nodes}, 102 | {p.addr: p.details for p in proxies}) 103 | 104 | def run(self): 105 | with self.app.app_context(): 106 | while True: 107 | try: 108 | self._shot() 109 | except Exception as e: 110 | logging.exception(e) 111 | -------------------------------------------------------------------------------- /dbver.py: -------------------------------------------------------------------------------- 1 | import sqlalchemy, os, sys, config 2 | from migrate.versioning import api as versioning_api 3 | from macpath import curdir 4 | 5 | _ENGINE = None 6 | 7 | def get_engine(): 8 | global _ENGINE 9 | if _ENGINE: 10 | return _ENGINE 11 | 12 | sql_connection = config.App.db_uri(config) 13 | engine_args = { 14 | "pool_recycle": 3600, 15 | "echo": False 16 | } 17 | 18 | _ENGINE = sqlalchemy.create_engine(sql_connection, **engine_args) 19 | 20 | _ENGINE.connect() 21 | return _ENGINE 22 | 23 | def get_repository(): 24 | return sys.path[0] + os.sep + "dbver" 25 | 26 | def db_version(): 27 | repository = get_repository() 28 | try: 29 | return versioning_api.db_version(get_engine(), repository) 30 | except : 31 | # if we aren't version controlled we may already have the database 32 | # in the state from before we started version control, check for that 33 | # and set up version_control appropriately 34 | meta = sqlalchemy.MetaData() 35 | engine = get_engine() 36 | meta.bind = engine 37 | tables = meta.tables 38 | if len(tables) == 0: 39 | db_version_control(000) 40 | return versioning_api.db_version(get_engine(), repository) 41 | else: 42 | # db has tables without migrate_version, it out of control 43 | raise 44 | 45 | def db_version_control(version=None): 46 | repository = get_repository() 47 | versioning_api.version_control(get_engine(), repository, version) 48 | 49 | def migrate_db(version=None): 50 | if version is not None: 51 | try: 52 | version = int(version) 53 | except ValueError as exception: 54 | raise exception 55 | 56 | # get current version 57 | current_version = db_version() 58 | print "cur ver: " + str(current_version) 59 | 60 | repository = get_repository() 61 | if version is None or version > current_version: 62 | versioning_api.upgrade(get_engine(), repository, version) 63 | else: 64 | versioning_api.downgrade(get_engine(), repository, version) 65 | newver = db_version() 66 | if newver > current_version: 67 | print "upgrade to ver: " + str(db_version()) 68 | elif newver < current_version: 69 | print "downgrade to ver: " + str(db_version()) 70 | else: 71 | print "nothing need to change" 72 | 73 | if __name__ == "__main__": 74 | if len(sys.argv) == 2: 75 | migrate_db(sys.argv[1]) 76 | else: 77 | migrate_db() 78 | -------------------------------------------------------------------------------- /dbver/README: -------------------------------------------------------------------------------- 1 | This is a database migration repository. 2 | 3 | More information at 4 | http://code.google.com/p/sqlalchemy-migrate/ 5 | -------------------------------------------------------------------------------- /dbver/__init__.py: -------------------------------------------------------------------------------- 1 | # template repository default module 2 | -------------------------------------------------------------------------------- /dbver/manage.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from migrate.versioning.shell import main 3 | 4 | if __name__ == '__main__': 5 | main(debug='False') 6 | -------------------------------------------------------------------------------- /dbver/migrate.cfg: -------------------------------------------------------------------------------- 1 | [db_settings] 2 | # Used to identify which repository this database is versioned under. 3 | # You can use the name of your project. 4 | repository_id=redisctl 5 | 6 | # The name of the database table used to track the schema version. 7 | # This name shouldn't already be used by your project. 8 | # If this is changed once a database is under version control, you'll need to 9 | # change the table name in each database too. 10 | version_table=dbver 11 | 12 | # When committing a change script, Migrate will attempt to generate the 13 | # sql for all supported databases; normally, if one of them fails - probably 14 | # because you don't have that database installed - it is ignored and the 15 | # commit continues, perhaps ending successfully. 16 | # Databases in this list MUST compile successfully during a commit, or the 17 | # entire commit will fail. List the databases your application will actually 18 | # be using to ensure your updates to that database work properly. 19 | # This must be a list; example: ['postgres','sqlite'] 20 | required_dbs=['mysql'] 21 | 22 | # When creating new change scripts, Migrate will stamp the new script with 23 | # a version number. By default this is latest_version + 1. You can set this 24 | # to 'true' to tell Migrate to use the UTC timestamp instead. 25 | use_timestamp_numbering=False 26 | -------------------------------------------------------------------------------- /dbver/versions/00002_proxy_type.py: -------------------------------------------------------------------------------- 1 | from migrate import * 2 | from sqlalchemy.sql.sqltypes import * 3 | from sqlalchemy.sql.schema import * 4 | from sqlalchemy.dialects.mysql.base import MEDIUMTEXT 5 | from sqlalchemy.sql.functions import func 6 | from sqlalchemy.dialects.mssql.base import TINYINT 7 | 8 | def upgrade(migrate_engine): 9 | meta = MetaData(bind=migrate_engine) 10 | proxy = Table('proxy', meta, autoload=True) 11 | proxy_type_column = Column('proxy_type', TINYINT, nullable=False, server_default="0") 12 | proxy.create_column(proxy_type_column) 13 | 14 | def downgrade(migrate_engine): 15 | meta = MetaData(bind=migrate_engine) 16 | proxy = Table('proxy', meta, autoload=True) 17 | proxy.c.proxy_type.drop() 18 | -------------------------------------------------------------------------------- /dbver/versions/00003_cluster_task.py: -------------------------------------------------------------------------------- 1 | from migrate import * 2 | from sqlalchemy.sql.sqltypes import * 3 | from sqlalchemy.sql.schema import * 4 | from sqlalchemy.dialects.mysql.base import MEDIUMTEXT 5 | from sqlalchemy.sql.functions import func 6 | from sqlalchemy.dialects.mssql.base import TINYINT 7 | 8 | def upgrade(migrate_engine): 9 | meta = MetaData(bind=migrate_engine) 10 | # user_id @ cluster_task 11 | cluster_task = Table('cluster_task', meta, autoload=True) 12 | if "user_id" not in cluster_task.c: 13 | user_id_column = Column('user_id', Integer, nullable=True, server_default=None) 14 | cluster_task.create_column(user_id_column) 15 | 16 | # creation @ cluster 17 | cluster = Table('cluster', meta, autoload=True) 18 | if "creation" not in cluster.c: 19 | creation_column = Column('creation', DateTime, nullable=False, server_default=func.now()) 20 | cluster.create_column(creation_column) 21 | 22 | def downgrade(migrate_engine): 23 | meta = MetaData(bind=migrate_engine) 24 | cluster_task = Table('cluster_task', meta, autoload=True) 25 | if "user_id" in cluster_task.c: 26 | cluster_task.c.user_id.drop() 27 | 28 | cluster = Table('cluster', meta, autoload=True) 29 | if "creation" in cluster.c: 30 | cluster.c.creation.drop() 31 | -------------------------------------------------------------------------------- /dbver/versions/__init__.py: -------------------------------------------------------------------------------- 1 | # template repository default versions module 2 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import config 2 | 3 | 4 | def main(): 5 | app = config.App(config) 6 | app.register_blueprints() 7 | app.write_polling_targets() 8 | app.run(host='127.0.0.1' if app.debug else '0.0.0.0', 9 | port=config.SERVER_PORT) 10 | 11 | if __name__ == '__main__': 12 | main() 13 | -------------------------------------------------------------------------------- /models/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | module = None 4 | for module in os.listdir(os.path.dirname(__file__)): 5 | if module == '__init__.py' or module[-3:] != '.py': 6 | continue 7 | __import__(module[:-3], locals(), globals()) 8 | del module 9 | -------------------------------------------------------------------------------- /models/audit.py: -------------------------------------------------------------------------------- 1 | import json 2 | from datetime import datetime 3 | from werkzeug.utils import cached_property 4 | 5 | from base import db, Base, DB_TEXT_TYPE 6 | 7 | EVENT_TYPE_CREATE = 0 8 | EVENT_TYPE_DELETE = 1 9 | EVENT_TYPE_CONFIG = 2 10 | EVENT_TYPE_EXEC = 3 11 | 12 | 13 | class NodeEvent(Base): 14 | __tablename__ = 'node_event' 15 | 16 | host = db.Column(db.String(32), nullable=False) 17 | port = db.Column(db.Integer, nullable=False) 18 | 19 | event_domain = db.Column(db.String(32), index=True) 20 | event_type = db.Column(db.Integer, nullable=False) 21 | 22 | creation = db.Column(db.DateTime, default=datetime.now, nullable=False, 23 | index=True) 24 | args_json = db.Column(DB_TEXT_TYPE, nullable=False) 25 | user_id = db.Column(db.Integer, index=True) 26 | 27 | __table_args__ = (db.Index('address', 'host', 'port'),) 28 | 29 | @cached_property 30 | def args(self): 31 | return json.loads(self.args_json) 32 | 33 | 34 | def _new_event(host, port, event_domain, event_type, user_id, args): 35 | e = NodeEvent( 36 | host=host, port=port, event_domain=event_domain, event_type=event_type, 37 | args_json=json.dumps(args), user_id=user_id) 38 | db.session.add(e) 39 | db.session.flush() 40 | return e 41 | 42 | 43 | def raw_event(host, port, event_type, user_id, args=''): 44 | return _new_event(host, port, None, event_type, user_id, args) 45 | 46 | 47 | def eru_event(host, port, event_type, user_id, args=''): 48 | return _new_event(host, port, 'eru', event_type, user_id, args) 49 | 50 | 51 | def list_events(skip, limit): 52 | return NodeEvent.query.order_by( 53 | NodeEvent.id.desc()).offset(skip).limit(limit).all() 54 | -------------------------------------------------------------------------------- /models/base.py: -------------------------------------------------------------------------------- 1 | from flask.ext.sqlalchemy import SQLAlchemy 2 | 3 | db = SQLAlchemy() 4 | DB_STRING_TYPE = db.Unicode(256) 5 | DB_TEXT_TYPE = db.TEXT(length=1 << 20, convert_unicode=True) 6 | 7 | 8 | def init_db(app): 9 | db.init_app(app) 10 | db.app = app 11 | db.create_all() 12 | 13 | 14 | def commit_session(): 15 | try: 16 | db.session.commit() 17 | except Exception: 18 | db.session.rollback() 19 | raise 20 | 21 | 22 | class Base(db.Model): 23 | __abstract__ = True 24 | __table_args__ = {'mysql_charset': 'utf8'} 25 | 26 | id = db.Column('id', db.Integer, primary_key=True, autoincrement=True) 27 | -------------------------------------------------------------------------------- /models/cluster.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from datetime import datetime 3 | from werkzeug.utils import cached_property 4 | 5 | from base import db, Base, DB_STRING_TYPE, commit_session 6 | 7 | 8 | class Cluster(Base): 9 | __tablename__ = 'cluster' 10 | 11 | description = db.Column(DB_STRING_TYPE) 12 | creation = db.Column(db.DateTime, default=datetime.now, nullable=False) 13 | nodes = db.relationship('RedisNode', backref='assignee') 14 | proxies = db.relationship('Proxy', backref='cluster') 15 | 16 | @cached_property 17 | def current_task(self): 18 | from task import TaskLock 19 | lock = db.session.query(TaskLock).filter( 20 | TaskLock.cluster_id == self.id).first() 21 | return None if lock is None else lock.task 22 | 23 | def get_tasks(self, skip=0, limit=5): 24 | from task import ClusterTask 25 | return db.session.query(ClusterTask).filter( 26 | ClusterTask.cluster_id == self.id).order_by( 27 | ClusterTask.id.desc()).offset(skip).limit(limit).all() 28 | 29 | def get_or_create_balance_plan(self): 30 | from cluster_plan import ClusterBalancePlan 31 | p = db.session.query(ClusterBalancePlan).filter( 32 | ClusterBalancePlan.cluster_id == self.id).first() 33 | if p is None: 34 | p = ClusterBalancePlan(cluster_id=self.id) 35 | p.balance_plan = { 36 | 'pod': None, 37 | 'entrypoint': None, 38 | 'slaves': [], 39 | } 40 | p.save() 41 | return p 42 | 43 | @cached_property 44 | def balance_plan(self): 45 | from cluster_plan import ClusterBalancePlan 46 | return ClusterBalancePlan.query.filter_by(cluster_id=self.id).first() 47 | 48 | def del_balance_plan(self): 49 | from cluster_plan import ClusterBalancePlan 50 | db.session.query(ClusterBalancePlan).filter( 51 | ClusterBalancePlan.cluster_id == self.id).delete() 52 | 53 | 54 | def get_by_id(cluster_id): 55 | return db.session.query(Cluster).filter(Cluster.id == cluster_id).first() 56 | 57 | 58 | def list_all(): 59 | return db.session.query(Cluster).all() 60 | 61 | 62 | def create_cluster(description): 63 | c = Cluster(description=description) 64 | db.session.add(c) 65 | db.session.flush() 66 | return c 67 | 68 | 69 | def remove_empty_cluster(cluster_id): 70 | c = get_by_id(cluster_id) 71 | if len(c.nodes) == 0: 72 | logging.info('Remove cluster %d', cluster_id) 73 | c.del_balance_plan() 74 | db.session.delete(c) 75 | commit_session() 76 | -------------------------------------------------------------------------------- /models/cluster_plan.py: -------------------------------------------------------------------------------- 1 | import json 2 | from werkzeug.utils import cached_property 3 | 4 | from base import db, Base, DB_TEXT_TYPE 5 | from cluster import Cluster 6 | 7 | 8 | class ClusterBalancePlan(Base): 9 | __tablename__ = 'cluster_balance_plan' 10 | 11 | cluster_id = db.Column(db.ForeignKey(Cluster.id), unique=True, 12 | nullable=False) 13 | balance_plan_json = db.Column(DB_TEXT_TYPE, nullable=False) 14 | 15 | @cached_property 16 | def balance_plan(self): 17 | return json.loads(self.balance_plan_json) 18 | 19 | def save(self): 20 | self.balance_plan_json = json.dumps(self.balance_plan) 21 | db.session.add(self) 22 | db.session.flush() 23 | 24 | @cached_property 25 | def pod(self): 26 | return self.balance_plan['pod'] 27 | 28 | @cached_property 29 | def host(self): 30 | return self.balance_plan.get('host') 31 | 32 | @cached_property 33 | def slaves(self): 34 | return self.balance_plan.get('slaves', []) 35 | 36 | @cached_property 37 | def aof(self): 38 | return (self.balance_plan.get('entrypoint') == 'aof' 39 | or self.balance_plan['aof']) 40 | 41 | 42 | def get_balance_plan_by_addr(host, port): 43 | from node import RedisNode 44 | n = RedisNode.query.filter_by(host=host, port=port).first() 45 | if n is None or n.assignee_id is None: 46 | return None 47 | return ClusterBalancePlan.query.filter_by(cluster_id=n.assignee_id).first() 48 | -------------------------------------------------------------------------------- /models/cont_image.py: -------------------------------------------------------------------------------- 1 | from base import db, Base 2 | 3 | TYPE_REDIS = 0 4 | 5 | 6 | class ContainerImage(Base): 7 | __tablename__ = 'cont_image' 8 | 9 | type = db.Column(db.Integer, index=True, default=TYPE_REDIS, 10 | nullable=False) 11 | name = db.Column(db.String(255), nullable=False, index=True, unique=True) 12 | description = db.Column(db.String(255)) 13 | creation = db.Column(db.DateTime) 14 | 15 | 16 | def list_redis(): 17 | return ContainerImage.query.all() 18 | 19 | 20 | def add_redis_image(name, description, creation): 21 | i = ContainerImage(name=name, description=description, creation=creation) 22 | db.session.add(i) 23 | db.session.flush() 24 | return i 25 | 26 | 27 | def del_redis_image(id): 28 | db.session.delete(ContainerImage.query.get(id)) 29 | -------------------------------------------------------------------------------- /models/node.py: -------------------------------------------------------------------------------- 1 | from werkzeug.utils import cached_property 2 | 3 | from base import db, Base 4 | from cluster import Cluster 5 | from models.base import commit_session 6 | 7 | 8 | class RedisNode(Base): 9 | __tablename__ = 'redis_node' 10 | 11 | host = db.Column(db.String(255), nullable=False) 12 | port = db.Column(db.Integer, nullable=False) 13 | eru_container_id = db.Column(db.String(64), index=True) 14 | assignee_id = db.Column(db.ForeignKey(Cluster.id), index=True) 15 | suppress_alert = db.Column(db.Integer, nullable=False, default=1) 16 | 17 | __table_args__ = (db.Index('address', 'host', 'port', unique=True),) 18 | 19 | def free(self): 20 | return self.assignee_id is None 21 | 22 | @cached_property 23 | def containerized(self): 24 | return self.eru_container_id is not None 25 | 26 | @cached_property 27 | def container_info(self): 28 | from flask import g 29 | if g.container_client is None or not self.containerized: 30 | return None 31 | return g.container_client.get_container(self.eru_container_id) 32 | 33 | 34 | def get_by_host_port(host, port): 35 | return db.session.query(RedisNode).filter( 36 | RedisNode.host == host, RedisNode.port == port).first() 37 | 38 | 39 | def list_eru_nodes(offset, limit): 40 | return db.session.query(RedisNode).filter( 41 | RedisNode.eru_container_id != None).order_by( 42 | RedisNode.id.desc()).offset(offset).limit(limit).all() 43 | 44 | 45 | def list_all_nodes(): 46 | return db.session.query(RedisNode).all() 47 | 48 | 49 | def create_instance(host, port): 50 | node = RedisNode(host=host, port=port) 51 | if get_by_host_port(host, port) is None: 52 | db.session.add(node) 53 | db.session.flush() 54 | return node 55 | 56 | 57 | def list_free(): 58 | return RedisNode.query.filter(RedisNode.assignee_id == None).order_by( 59 | RedisNode.id.desc()).all() 60 | 61 | 62 | def create_eru_instance(host, port, eru_container_id): 63 | node = RedisNode(host=host, port=port, eru_container_id=eru_container_id) 64 | if get_by_host_port(host, port) is None: 65 | db.session.add(node) 66 | db.session.flush() 67 | return node 68 | 69 | 70 | def delete_eru_instance(eru_container_id): 71 | i = db.session.query(RedisNode).filter( 72 | RedisNode.eru_container_id == eru_container_id).first() 73 | if i is None or i.assignee_id is not None: 74 | raise ValueError('node not free') 75 | db.session.delete(i) 76 | 77 | 78 | def get_eru_by_container_id(eru_container_id): 79 | return db.session.query(RedisNode).filter( 80 | RedisNode.eru_container_id == eru_container_id).first() 81 | 82 | 83 | def delete_free_instance(host, port): 84 | node = db.session.query(RedisNode).filter( 85 | RedisNode.host == host, 86 | RedisNode.port == port, 87 | RedisNode.assignee_id == None).with_for_update().first() 88 | if node is not None: 89 | db.session.delete(node) 90 | -------------------------------------------------------------------------------- /models/polling_stat.py: -------------------------------------------------------------------------------- 1 | import json 2 | from datetime import datetime 3 | 4 | from base import db, Base, DB_TEXT_TYPE 5 | from werkzeug.utils import cached_property 6 | 7 | 8 | class PollingStat(Base): 9 | __tablename__ = 'polling_stat' 10 | 11 | polling_time = db.Column(db.DateTime, default=datetime.now, nullable=False, 12 | index=True) 13 | stat_json = db.Column(DB_TEXT_TYPE, nullable=False) 14 | 15 | def __init__(self, nodes_ok, nodes_fail, proxies_ok, proxies_fail): 16 | Base.__init__(self, stat_json=json.dumps({ 17 | 'nodes_ok': nodes_ok, 18 | 'nodes_fail': nodes_fail, 19 | 'proxies_ok': proxies_ok, 20 | 'proxies_fail': proxies_fail, 21 | })) 22 | 23 | @cached_property 24 | def stat(self): 25 | return json.loads(self.stat_json) 26 | 27 | @cached_property 28 | def nodes_ok(self): 29 | return self.stat['nodes_ok'] 30 | 31 | @cached_property 32 | def nodes_fail(self): 33 | return self.stat['nodes_fail'] 34 | 35 | @cached_property 36 | def proxies_ok(self): 37 | return self.stat['proxies_ok'] 38 | 39 | @cached_property 40 | def proxies_fail(self): 41 | return self.stat['proxies_fail'] 42 | -------------------------------------------------------------------------------- /models/proxy.py: -------------------------------------------------------------------------------- 1 | from werkzeug.utils import cached_property 2 | 3 | from base import db, Base 4 | from cluster import Cluster 5 | 6 | TYPE_CERBERUS = 0 7 | TYPE_CORVUS = 1 8 | 9 | class Proxy(Base): 10 | __tablename__ = 'proxy' 11 | 12 | host = db.Column(db.String(255), nullable=False) 13 | port = db.Column(db.Integer, nullable=False) 14 | eru_container_id = db.Column(db.String(64), index=True) 15 | cluster_id = db.Column(db.ForeignKey(Cluster.id), index=True) 16 | suppress_alert = db.Column(db.Integer, nullable=False, default=1) 17 | proxy_type = db.Column(db.Integer, nullable=False, default=0) 18 | 19 | __table_args__ = (db.Index('address', 'host', 'port', unique=True),) 20 | 21 | @cached_property 22 | def containerized(self): 23 | return self.eru_container_id is not None 24 | 25 | @cached_property 26 | def container_info(self): 27 | from flask import g 28 | if g.container_client is None or not self.containerized: 29 | return None 30 | return g.container_client.get_container(self.eru_container_id) 31 | 32 | @cached_property 33 | def cluster(self): 34 | return Cluster.query.get(self.cluster_id) 35 | 36 | def proxy_typename(self): 37 | if self.proxy_type == TYPE_CERBERUS: 38 | return 'Cerberus' 39 | elif self.proxy_type == TYPE_CORVUS: 40 | return 'Corvus' 41 | else: 42 | return 'Unknow' 43 | 44 | 45 | def get_by_host_port(host, port): 46 | return db.session.query(Proxy).filter( 47 | Proxy.host == host, Proxy.port == port).first() 48 | 49 | 50 | def del_by_host_port(host, port): 51 | return db.session.query(Proxy).filter( 52 | Proxy.host == host, Proxy.port == port).delete() 53 | 54 | 55 | def get_or_create(host, port, cluster_id=None, proxy_type=TYPE_CERBERUS): 56 | p = db.session.query(Proxy).filter( 57 | Proxy.host == host, Proxy.port == port).first() 58 | if p is None: 59 | p = Proxy(host=host, port=port, proxy_type=proxy_type, cluster_id=cluster_id) 60 | db.session.add(p) 61 | db.session.flush() 62 | return p 63 | 64 | 65 | def create_eru_instance(host, port, cluster_id, eru_container_id): 66 | node = Proxy(host=host, port=port, eru_container_id=eru_container_id, 67 | cluster_id=cluster_id) 68 | db.session.add(node) 69 | db.session.flush() 70 | return node 71 | 72 | 73 | def delete_eru_instance(eru_container_id): 74 | db.session.query(Proxy).filter( 75 | Proxy.eru_container_id == eru_container_id).delete() 76 | 77 | 78 | def get_eru_by_container_id(eru_container_id): 79 | return db.session.query(Proxy).filter( 80 | Proxy.eru_container_id == eru_container_id).first() 81 | 82 | 83 | def list_all(): 84 | return db.session.query(Proxy).all() 85 | 86 | 87 | def list_eru_proxies(offset, limit): 88 | return db.session.query(Proxy).filter( 89 | Proxy.eru_container_id != None).order_by( 90 | Proxy.id.desc()).offset(offset).limit(limit).all() 91 | 92 | 93 | def list_ip(): 94 | return db.session.query(Proxy.host, Proxy.port).all() 95 | -------------------------------------------------------------------------------- /models/stats_base.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from hiredis import ReplyError 3 | 4 | from models.base import db, Base 5 | import models.node 6 | import models.proxy 7 | from models.proxy import TYPE_CORVUS 8 | 9 | 10 | class StatsBase(Base): 11 | __abstract__ = True 12 | 13 | addr = db.Column('addr', db.String(255), unique=True, nullable=False) 14 | poll_count = db.Column('poll_count', db.Integer, nullable=False) 15 | avail_count = db.Column('avail_count', db.Integer, nullable=False) 16 | 17 | def __init__(self, *args, **kwargs): 18 | Base.__init__(self, *args, **kwargs) 19 | self.init() 20 | 21 | def init(self): 22 | self.suppress_alert = 1 23 | self.details = {} 24 | self.app = None 25 | self.typename = '' 26 | if len(self.addr) > 0 and self.addr.find(':') > 0: 27 | self.host, port = self.addr.split(':') 28 | self.port = int(port) 29 | else: 30 | self.host = None 31 | self.port = None 32 | self.details['host'] = self.host 33 | self.details['port'] = self.port 34 | 35 | def get_endpoint(self): 36 | raise NotImplementedError() 37 | 38 | @classmethod 39 | def get_by(cls, host, port): 40 | addr = '%s:%d' % (host, port) 41 | n = db.session.query(cls).filter(cls.addr == addr).first() 42 | if n is None: 43 | n = cls(addr=addr, poll_count=0, avail_count=0) 44 | db.session.add(n) 45 | db.session.flush() 46 | n.init() 47 | return n 48 | 49 | def set_available(self): 50 | self.avail_count += 1 51 | self.poll_count += 1 52 | self.details['stat'] = True 53 | self.details['sla'] = self.sla() 54 | 55 | def set_unavailable(self): 56 | self.poll_count += 1 57 | self.details['stat'] = False 58 | self.details['sla'] = self.sla() 59 | 60 | def get(self, key, default=None): 61 | return self.details.get(key, default) 62 | 63 | def sla(self): 64 | if self.poll_count == 0: 65 | return 0 66 | return float(self.avail_count) / self.poll_count 67 | 68 | def stats_data(self): 69 | raise NotImplementedError() 70 | 71 | def _collect_stats(self): 72 | raise NotImplementedError() 73 | 74 | def collect_stats(self): 75 | try: 76 | self._collect_stats() 77 | self.app.stats_write(self.addr, self.stats_data()) 78 | except (IOError, ValueError, LookupError, ReplyError) as e: 79 | logging.exception(e) 80 | self.set_unavailable() 81 | self.send_alarm( 82 | '%s failed: %s:%d - %s' % ( 83 | self.typename, self.host, self.port, e), e) 84 | 85 | def send_alarm(self, message, exception): 86 | ep = self.get_endpoint() 87 | if self.suppress_alert != 1 and ep is not None: 88 | self.app.send_alarm(ep, message, exception) 89 | 90 | def add_to_db(self): 91 | db.session.add(self) 92 | 93 | 94 | class RedisStatsBase(StatsBase): 95 | __tablename__ = 'redis_node_status' 96 | 97 | def __init__(self, *args, **kwargs): 98 | StatsBase.__init__(self, *args, **kwargs) 99 | 100 | def init(self): 101 | StatsBase.init(self) 102 | self.typename = 'Redis' 103 | 104 | def get_endpoint(self): 105 | return models.node.get_by_host_port(self.host, self.port) 106 | 107 | 108 | class ProxyStatsBase(StatsBase): 109 | __tablename__ = 'proxy_status' 110 | 111 | def __init__(self, *args, **kwargs): 112 | StatsBase.__init__(self, *args, **kwargs) 113 | 114 | def init(self): 115 | StatsBase.init(self) 116 | proxy = self.get_endpoint() 117 | if proxy.proxy_type == TYPE_CORVUS: 118 | self.typename = 'Corvus' 119 | else: 120 | self.typename = 'Cerberus' 121 | 122 | def get_endpoint(self): 123 | return models.proxy.get_by_host_port(self.host, self.port) 124 | -------------------------------------------------------------------------------- /override_config.py.example: -------------------------------------------------------------------------------- 1 | import os 2 | import logging 3 | 4 | SERVER_PORT = 5000 5 | 6 | MYSQL_HOST = '127.0.0.1' 7 | MYSQL_PORT = 3306 8 | MYSQL_USERNAME = 'root' 9 | MYSQL_PASSWORD = '123456' 10 | MYSQL_DATABASE = 'redisctl' 11 | 12 | LOG_LEVEL = logging.DEBUG 13 | 14 | DEBUG = 1 15 | POLL_INTERVAL = 10 16 | 17 | TEST_SQLALCHEMY_DATABASE_URI = 'mysql://user:pass@127.0.0.1:3306/redisctltest' 18 | 19 | OPEN_FALCON = None 20 | -------------------------------------------------------------------------------- /release_task_lock.py: -------------------------------------------------------------------------------- 1 | import config 2 | import models.base 3 | from models.task import TaskLock 4 | 5 | 6 | def main(): 7 | app = config.App(config) 8 | with app.app_context(): 9 | for lock in models.base.db.session.query(TaskLock).all(): 10 | if lock.step is not None: 11 | lock.step.complete('Force release lock') 12 | models.base.db.session.delete(lock) 13 | models.base.db.session.commit() 14 | 15 | if __name__ == '__main__': 16 | main() 17 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | Flask==0.10.1 2 | Flask-SQLAlchemy==2.1 3 | gevent>=1.1.1 4 | greenlet>=0.4.9 5 | hiredis==0.2.0 6 | itsdangerous==0.24 7 | Jinja2==2.8 8 | MarkupSafe==0.23 9 | MySQL-python==1.2.5 10 | redis-trib>=0.5.0 11 | retrying==1.3.3 12 | six==1.9.0 13 | SQLAlchemy>=1.0.13 14 | sqlalchemy-migrate>=0.9.0 15 | Werkzeug>=0.11.10 16 | wheel>=0.24.0 17 | requests>=2.4.3 18 | -------------------------------------------------------------------------------- /static/css/common.css: -------------------------------------------------------------------------------- 1 | header { 2 | color: #bbb; 3 | padding: 8px; 4 | min-height: 24px; 5 | line-height: 22px; 6 | font-size: 16px; 7 | margin-bottom: 10px; 8 | } 9 | .nav-link { 10 | color: #68d098; 11 | } 12 | .nav-link:hover { 13 | color: #4cc; 14 | text-decoration: none; 15 | } 16 | 17 | footer { 18 | background-color: #eee; 19 | color: #666; 20 | min-height: 32px; 21 | line-height: 28px; 22 | font-size: 16px; 23 | margin-top: 10px; 24 | border-top: 1px dashed #aaa; 25 | } 26 | 27 | .no-stats-mode .stats-display { display: none } 28 | .no-alarm-mode .alarm-display { display: none } 29 | .no-container-mode .container-display { display: none } 30 | .no-adv-user-mode .adv-user-display { display: none } 31 | 32 | .table caption { 33 | font-size: 2em; 34 | font-weight: bold; 35 | } 36 | 37 | th { 38 | padding: 4px; 39 | } 40 | 41 | hr { 42 | clear: both; 43 | } 44 | 45 | .selected { 46 | background-color: #fc8; 47 | } 48 | 49 | .label-cell { 50 | min-width: 72px; 51 | padding-right: 2em; 52 | } 53 | 54 | .master {color: #044} 55 | .slave {color: #440} 56 | -------------------------------------------------------------------------------- /static/fonts/digital/ds-digib-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/projecteru/redis-ctl/16ae59b6dfe3d62ecb59951bd81395c370b005ef/static/fonts/digital/ds-digib-webfont.eot -------------------------------------------------------------------------------- /static/fonts/digital/ds-digib-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/projecteru/redis-ctl/16ae59b6dfe3d62ecb59951bd81395c370b005ef/static/fonts/digital/ds-digib-webfont.woff -------------------------------------------------------------------------------- /static/fonts/glyphicons/flat-ui-icons-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/projecteru/redis-ctl/16ae59b6dfe3d62ecb59951bd81395c370b005ef/static/fonts/glyphicons/flat-ui-icons-regular.woff -------------------------------------------------------------------------------- /static/fonts/lato/lato-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/projecteru/redis-ctl/16ae59b6dfe3d62ecb59951bd81395c370b005ef/static/fonts/lato/lato-bold.woff -------------------------------------------------------------------------------- /static/fonts/lato/lato-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/projecteru/redis-ctl/16ae59b6dfe3d62ecb59951bd81395c370b005ef/static/fonts/lato/lato-regular.woff -------------------------------------------------------------------------------- /static/js/application.js: -------------------------------------------------------------------------------- 1 | // Some general UI pack related JS 2 | // Extend JS String with repeat method 3 | String.prototype.repeat = function (num) { 4 | return new Array(Math.round(num) + 1).join(this); 5 | }; 6 | 7 | (function ($) { 8 | 9 | // Add segments to a slider 10 | $.fn.addSliderSegments = function () { 11 | return this.each(function () { 12 | var $this = $(this), 13 | option = $this.slider('option'), 14 | amount = (option.max - option.min)/option.step, 15 | orientation = option.orientation; 16 | if ( 'vertical' === orientation ) { 17 | var output = '', i; 18 | for (i = 1; i <= amount - 1; i++) { 19 | output += '
'; 20 | } 21 | $this.prepend(output); 22 | } else { 23 | var segmentGap = 100 / (amount) + '%'; 24 | var segment = '
'; 25 | $this.prepend(segment.repeat(amount - 1)); 26 | } 27 | }); 28 | }; 29 | 30 | $(function () { 31 | 32 | // Todo list 33 | $('.todo').on('click', 'li', function () { 34 | $(this).toggleClass('todo-done'); 35 | }); 36 | 37 | // Custom Selects 38 | if ($('[data-toggle="select"]').length) { 39 | $('[data-toggle="select"]').select2(); 40 | } 41 | 42 | // Checkboxes and Radio buttons 43 | $('[data-toggle="checkbox"]').radiocheck(); 44 | $('[data-toggle="radio"]').radiocheck(); 45 | 46 | // Tooltips 47 | $('[data-toggle=tooltip]').tooltip('show'); 48 | 49 | // jQuery UI Sliders 50 | var $slider = $('#slider'); 51 | if ($slider.length > 0) { 52 | $slider.slider({ 53 | max: 15, 54 | step: 6, 55 | value: 3, 56 | orientation: 'horizontal', 57 | range: 'min' 58 | }).addSliderSegments(); 59 | } 60 | 61 | var $verticalSlider = $('#vertical-slider'); 62 | if ($verticalSlider.length) { 63 | $verticalSlider.slider({ 64 | min: 1, 65 | max: 5, 66 | value: 3, 67 | orientation: 'vertical', 68 | range: 'min' 69 | }).addSliderSegments($verticalSlider.slider('option').max, 'vertical'); 70 | } 71 | 72 | // Focus state for append/prepend inputs 73 | $('.input-group').on('focus', '.form-control', function () { 74 | $(this).closest('.input-group, .form-group').addClass('focus'); 75 | }).on('blur', '.form-control', function () { 76 | $(this).closest('.input-group, .form-group').removeClass('focus'); 77 | }); 78 | 79 | // Make pagination demo work 80 | $('.pagination').on('click', 'a', function () { 81 | $(this).parent().siblings('li').removeClass('active').end().addClass('active'); 82 | }); 83 | 84 | $('.btn-group').on('click', 'a', function () { 85 | $(this).siblings().removeClass('active').end().addClass('active'); 86 | }); 87 | 88 | // Disable link clicks to prevent page scrolling 89 | $(document).on('click', 'a[href="#fakelink"]', function (e) { 90 | e.preventDefault(); 91 | }); 92 | 93 | // Switches 94 | if ($('[data-toggle="switch"]').length) { 95 | $('[data-toggle="switch"]').bootstrapSwitch(); 96 | } 97 | 98 | // Typeahead 99 | if ($('#typeahead-demo-01').length) { 100 | var states = new Bloodhound({ 101 | datumTokenizer: function (d) { return Bloodhound.tokenizers.whitespace(d.word); }, 102 | queryTokenizer: Bloodhound.tokenizers.whitespace, 103 | limit: 4, 104 | local: [ 105 | { word: 'Alabama' }, 106 | { word: 'Alaska' }, 107 | { word: 'Arizona' }, 108 | { word: 'Arkansas' }, 109 | { word: 'California' }, 110 | { word: 'Colorado' } 111 | ] 112 | }); 113 | 114 | states.initialize(); 115 | 116 | $('#typeahead-demo-01').typeahead(null, { 117 | name: 'states', 118 | displayKey: 'word', 119 | source: states.ttAdapter() 120 | }); 121 | } 122 | 123 | // make code pretty 124 | window.prettyPrint && prettyPrint(); 125 | 126 | }); 127 | 128 | })(jQuery); 129 | -------------------------------------------------------------------------------- /static/js/common.js: -------------------------------------------------------------------------------- 1 | $(document).ready(function() { 2 | $('.check-suppress-alert').enableLabelCheck({ 3 | checkedClass: 'bell-slash-o', 4 | uncheckedClass: 'bell', 5 | onClick: function(self) { 6 | $.post('/set_alarm/' + self.data('ntype'), { 7 | host: self.data('host'), 8 | port: self.data('port'), 9 | suppress: self.prop('checked') ? '1' : '0' 10 | }); 11 | } 12 | }); 13 | $('.delete-proxy-btn').click(function() { 14 | var btn = $(this); 15 | btn.attr('disabled', 'disabled').text(_('Please wait')); 16 | $.ajax({ 17 | url: '/cluster/delete_proxy', 18 | type: 'POST', 19 | data: { 20 | host: btn.data('host'), 21 | port: btn.data('port') 22 | }, 23 | success: function() { 24 | btn.text(_('Proxy unregistered')); 25 | }, 26 | error: function(e) { 27 | btn.text(_('failed') + ': ' + e.responseText); 28 | } 29 | }); 30 | }) 31 | 32 | $('.panel-heading-hide-content').click(function() { 33 | $(this).next().slideToggle(); 34 | }); 35 | }); 36 | 37 | window.TRANSLATIONS = window.TRANSLATIONS || {}; 38 | window._ = function(text) { 39 | return window.TRANSLATIONS[text] || text; 40 | }; 41 | -------------------------------------------------------------------------------- /static/js/containerize.js: -------------------------------------------------------------------------------- 1 | function delContainer() { 2 | if (!confirm(_('This container will be removed, are you sure?'))) { 3 | return; 4 | } 5 | var self = $(this).attr('disabled', 'disabled'); 6 | $.ajax({ 7 | url: '/containerize/remove', 8 | method: 'POST', 9 | data: { 10 | id: self.data('cid'), 11 | type: self.data('type') 12 | }, 13 | success: function() { 14 | if (delContainer.deleted) { 15 | delContainer.deleted(self); 16 | } else { 17 | window.location.reload(); 18 | } 19 | }, 20 | error: function(e) { 21 | self.text(_('failed') + ':' + e.responseText); 22 | } 23 | }); 24 | } 25 | 26 | $(document).ready(function() { 27 | $('.btn-del-container').click(delContainer); 28 | $('.btn-revive-container').click(function () { 29 | var self = $(this).attr('disabled', 'disabled'); 30 | $.ajax({ 31 | url: '/containerize/revive', 32 | method: 'POST', 33 | data: {id: self.data('cid')}, 34 | success: function() { 35 | window.location.reload(); 36 | }, 37 | error: function(e) { 38 | self.text(_('failed') + ':' + e.responseText); 39 | } 40 | }); 41 | }); 42 | }); 43 | -------------------------------------------------------------------------------- /static/js/jquery.localize.min.js: -------------------------------------------------------------------------------- 1 | /*! Localize - v0.1.0 - 2015-08-21 2 | * https://github.com/coderifous/jquery-localize 3 | * Copyright (c) 2015 coderifous; Licensed MIT */ 4 | !function(a){var b;return b=function(a){return a=a.replace(/_/,"-").toLowerCase(),a.length>3&&(a=a.substring(0,3)+a.substring(3).toUpperCase()),a},a.defaultLanguage=b(navigator.languages?navigator.languages[0]:navigator.language||navigator.userLanguage),a.localize=function(c,d){var e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u;return null==d&&(d={}),u=this,g={},f=d.fileExtension||"json",j=function(a,b,c){var e;switch(null==c&&(c=1),c){case 1:return g={},d.loadBase?(e=a+("."+f),h(e,a,b,c)):j(a,b,2);case 2:if(b.length>=2)return e=""+a+"-"+b.substring(0,2)+"."+f,h(e,a,b,c);break;case 3:if(b.length>=5)return e=""+a+"-"+b.substring(0,5)+"."+f,h(e,a,b,c)}},h=function(b,c,e,f){var h,i,k;return null!=d.pathPrefix&&(b=""+d.pathPrefix+"/"+b),k=function(b){return a.extend(g,b),p(g),j(c,e,f+1)},i=function(){return d.fallback&&d.fallback!==e?j(c,d.fallback):void 0},h={url:b,dataType:"json",async:!1,timeout:null!=d.timeout?d.timeout:500,success:k,error:i},"file:"===window.location.protocol&&(h.error=function(b){return k(a.parseJSON(b.responseText))}),a.ajax(h)},p=function(a){return null!=d.callback?d.callback(a,e):e(a)},e=function(b){return a.localize.data[c]=b,u.each(function(){var c,d,e;return c=a(this),d=c.data("localize"),d||(d=c.attr("rel").match(/localize\[(.*?)\]/)[1]),e=t(d,b),null!=e?k(c,d,e):void 0})},k=function(b,c,d){return b.is("input")?n(b,c,d):b.is("textarea")?n(b,c,d):b.is("img")?m(b,c,d):b.is("optgroup")?o(b,c,d):a.isPlainObject(d)||b.html(d),a.isPlainObject(d)?l(b,d):void 0},n=function(b,c,d){var e;return e=a.isPlainObject(d)?d.value:d,b.is("[placeholder]")?b.attr("placeholder",e):b.val(e)},l=function(a,b){return r(a,"title",b),r(a,"href",b),s(a,"text",b)},o=function(a,b,c){return a.attr("label",c)},m=function(a,b,c){return r(a,"alt",c),r(a,"src",c)},t=function(a,b){var c,d,e,f;for(c=a.split(/\./),d=b,e=0,f=c.length;f>e;e++)a=c[e],d=null!=d?d[a]:null;return d},r=function(a,b,c){return c=t(b,c),null!=c?a.attr(b,c):void 0},s=function(a,b,c){return c=t(b,c),null!=c?a.text(c):void 0},q=function(a){var b;return"string"==typeof a?"^"+a+"$":null!=a.length?function(){var c,d,e;for(e=[],c=0,d=a.length;d>c;c++)b=a[c],e.push(q(b));return e}().join("|"):a},i=b(d.language?d.language:a.defaultLanguage),d.skipLanguage&&i.match(q(d.skipLanguage))||j(c,i,1),u},a.fn.localize=a.localize,a.localize.data={}}(jQuery); -------------------------------------------------------------------------------- /static/js/prune.js: -------------------------------------------------------------------------------- 1 | function prune(self, url, data) { 2 | data = data || {}; 3 | data.id = self.data('id'); 4 | self.attr('disabled', 'disabled'); 5 | $.ajax({ 6 | type: 'POST', 7 | url: url, 8 | data: data, 9 | success: function() { self.text(_('done')); }, 10 | error: function(e) { 11 | console.error(e); 12 | alert(_('failed')); 13 | } 14 | }); 15 | } 16 | -------------------------------------------------------------------------------- /static/js/redis_node.js: -------------------------------------------------------------------------------- 1 | $(document).ready(function() { 2 | $('.fix-migrating-btn').click(taskFixMigrating); 3 | $('.node-deleter').click(function() { 4 | var btn = $(this); 5 | $.post('/redis/del', { 6 | host: btn.data('host'), 7 | port: btn.data('port') 8 | }, function() { 9 | btn.parent().html(_('Redis unregistered')); 10 | $('button,.panel-div').remove(); 11 | }); 12 | }); 13 | }); 14 | 15 | function cmdInfo(host, port, onText, onBlocks, onError) { 16 | $.get('/cmd/info', {host: host, port: port}, function(r) { 17 | onText(r); 18 | var lines = r.split('\n'); 19 | var blocks = [{items: []}]; 20 | for (var i = 0; i < lines.length; ++i) { 21 | var ln = lines[i].trim(); 22 | if (!ln) { 23 | continue; 24 | } 25 | if (ln[0] === '#') { 26 | blocks.push({ 27 | title: ln.slice(1).trim(), 28 | items: [] 29 | }); 30 | continue; 31 | } 32 | var k = ln.indexOf(':'); 33 | if (-1 === k) { 34 | continue; 35 | } 36 | blocks[blocks.length - 1].items.push({ 37 | key: ln.slice(0, k), 38 | value: ln.slice(k + 1) 39 | }); 40 | } 41 | onBlocks(blocks); 42 | }).error(onError); 43 | } 44 | 45 | function cmdGetMaxMemory(host, port, onMaxMemory, onNotSet, onError) { 46 | $.get('/cmd/get_max_mem', {host: host, port: port}, function(r) { 47 | var m = parseInt(r[1]); 48 | if (m === 0) { 49 | return onNotSet(); 50 | } 51 | onMaxMemory(m); 52 | }).error(onError); 53 | } 54 | 55 | function sortNodeByAddr(a, b) { 56 | if (a.host == b.host) { 57 | return a.port - b.port; 58 | } 59 | return a.host < b.host ? -1 : 1; 60 | } 61 | 62 | function parseClusterNodes(text) { 63 | function parseOne(parts) { 64 | if (parts.length < 8) { 65 | return; 66 | } 67 | var myself = false; 68 | var flags = parts[2].split(','); 69 | for (var j = 0; j < flags.length; ++j) { 70 | if (flags[j] === 'handshake') { 71 | return; 72 | } 73 | if (flags[j] === 'myself') { 74 | myself = true; 75 | } 76 | } 77 | var host_port = parts[1].split(':'); 78 | var slots = []; 79 | var migrating = false; 80 | for (j = 8; j < parts.length; ++j) { 81 | if (parts[j][0] == '[') { 82 | migrating = true; 83 | continue; 84 | } 85 | var slots_range = parts[j].split('-'); 86 | if (slots_range.length === 1) { 87 | slots.push(parseInt(slots_range[0])); 88 | continue; 89 | } 90 | for (var s = parseInt(slots_range[0]); s <= parseInt(slots_range[1]); ++s) { 91 | slots.push(s); 92 | } 93 | } 94 | return { 95 | node_id: parts[0], 96 | address: parts[1], 97 | host: host_port[0], 98 | port: parseInt(host_port[1]), 99 | flags: parts[2].split(','), 100 | myself: myself, 101 | slave: parts[3] !== '-', 102 | master_id: parts[3] === '-' ? null : parts[3], 103 | migrating: migrating, 104 | slots: slots, 105 | slots_text: parts.slice(8).join(' '), 106 | stat: true 107 | }; 108 | } 109 | 110 | var lines = text.split('\n'); 111 | var parts; 112 | var nodes = []; 113 | for (var i = 0; i < lines.length; ++i) { 114 | if (!lines[i]) { 115 | continue; 116 | } 117 | var n = parseOne(lines[i].split(' ')); 118 | if (n) { 119 | nodes.push(n); 120 | } 121 | } 122 | return nodes; 123 | } 124 | 125 | function rolesLabels(roles) { 126 | return roles.map(function(e) { 127 | var color = null; 128 | var text = e; 129 | switch(e) { 130 | case 'myself': 131 | color = 'default'; 132 | text = _('myself'); 133 | break; 134 | case 'master': 135 | color = 'primary'; 136 | text = _('master'); 137 | break; 138 | case 'slave': 139 | color = 'warning'; 140 | text = _('slave'); 141 | break; 142 | case 'fail': 143 | color = 'danger'; 144 | text = 'F'; 145 | break; 146 | } 147 | return bscp.label(text, color); 148 | }); 149 | } 150 | 151 | function redisRelations(redisList) { 152 | var masters = {}; 153 | var allMasters = []; 154 | var slaves = []; 155 | 156 | $.each(redisList, function(i, n) { 157 | if (n.slave) { 158 | return slaves.push(n); 159 | } 160 | allMasters.push(n); 161 | n.slaves = []; 162 | if (n.node_id) { 163 | return masters[n.node_id] = n; 164 | } 165 | }); 166 | 167 | $.each(slaves, function(i, n) { 168 | if (masters[n.master_id]) { 169 | return masters[n.master_id].slaves.push(n); 170 | } 171 | allMasters.push(n); 172 | }); 173 | 174 | allMasters.sort(sortNodeByAddr); 175 | return allMasters; 176 | } 177 | -------------------------------------------------------------------------------- /static/lib/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/projecteru/redis-ctl/16ae59b6dfe3d62ecb59951bd81395c370b005ef/static/lib/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /static/lib/js/d3/utils.js: -------------------------------------------------------------------------------- 1 | 2 | nv.utils.windowSize = function() { 3 | // Sane defaults 4 | var size = {width: 640, height: 480}; 5 | 6 | // Earlier IE uses Doc.body 7 | if (document.body && document.body.offsetWidth) { 8 | size.width = document.body.offsetWidth; 9 | size.height = document.body.offsetHeight; 10 | } 11 | 12 | // IE can use depending on mode it is in 13 | if (document.compatMode=='CSS1Compat' && 14 | document.documentElement && 15 | document.documentElement.offsetWidth ) { 16 | size.width = document.documentElement.offsetWidth; 17 | size.height = document.documentElement.offsetHeight; 18 | } 19 | 20 | // Most recent browsers use 21 | if (window.innerWidth && window.innerHeight) { 22 | size.width = window.innerWidth; 23 | size.height = window.innerHeight; 24 | } 25 | return (size); 26 | }; 27 | 28 | 29 | 30 | // Easy way to bind multiple functions to window.onresize 31 | // TODO: give a way to remove a function after its bound, other than removing all of them 32 | nv.utils.windowResize = function(fun){ 33 | if (fun === undefined) return; 34 | var oldresize = window.onresize; 35 | 36 | window.onresize = function(e) { 37 | if (typeof oldresize == 'function') oldresize(e); 38 | fun(e); 39 | } 40 | } 41 | 42 | // Backwards compatible way to implement more d3-like coloring of graphs. 43 | // If passed an array, wrap it in a function which implements the old default 44 | // behavior 45 | nv.utils.getColor = function(color) { 46 | if (!arguments.length) return nv.utils.defaultColor(); //if you pass in nothing, get default colors back 47 | 48 | if( Object.prototype.toString.call( color ) === '[object Array]' ) 49 | return function(d, i) { return d.color || color[i % color.length]; }; 50 | else 51 | return color; 52 | //can't really help it if someone passes rubbish as color 53 | } 54 | 55 | // Default color chooser uses the index of an object as before. 56 | nv.utils.defaultColor = function() { 57 | var colors = d3.scale.category20().range(); 58 | return function(d, i) { return d.color || colors[i % colors.length] }; 59 | } 60 | 61 | 62 | // Returns a color function that takes the result of 'getKey' for each series and 63 | // looks for a corresponding color from the dictionary, 64 | nv.utils.customTheme = function(dictionary, getKey, defaultColors) { 65 | getKey = getKey || function(series) { return series.key }; // use default series.key if getKey is undefined 66 | defaultColors = defaultColors || d3.scale.category20().range(); //default color function 67 | 68 | var defIndex = defaultColors.length; //current default color (going in reverse) 69 | 70 | return function(series, index) { 71 | var key = getKey(series); 72 | 73 | if (!defIndex) defIndex = defaultColors.length; //used all the default colors, start over 74 | 75 | if (typeof dictionary[key] !== "undefined") 76 | return (typeof dictionary[key] === "function") ? dictionary[key]() : dictionary[key]; 77 | else 78 | return defaultColors[--defIndex]; // no match in dictionary, use default color 79 | } 80 | } 81 | 82 | 83 | 84 | // From the PJAX example on d3js.org, while this is not really directly needed 85 | // it's a very cool method for doing pjax, I may expand upon it a little bit, 86 | // open to suggestions on anything that may be useful 87 | nv.utils.pjax = function(links, content) { 88 | d3.selectAll(links).on("click", function() { 89 | history.pushState(this.href, this.textContent, this.href); 90 | load(this.href); 91 | d3.event.preventDefault(); 92 | }); 93 | 94 | function load(href) { 95 | d3.html(href, function(fragment) { 96 | var target = d3.select(content).node(); 97 | target.parentNode.replaceChild(d3.select(fragment).select(content).node(), target); 98 | nv.utils.pjax(links, content); 99 | }); 100 | } 101 | 102 | d3.select(window).on("popstate", function() { 103 | if (d3.event.state) load(d3.event.state); 104 | }); 105 | } 106 | 107 | /* For situations where we want to approximate the width in pixels for an SVG:text element. 108 | Most common instance is when the element is in a display:none; container. 109 | Forumla is : text.length * font-size * constant_factor 110 | */ 111 | nv.utils.calcApproxTextWidth = function (svgTextElem) { 112 | if (typeof svgTextElem.style === 'function' 113 | && typeof svgTextElem.text === 'function') { 114 | var fontSize = parseInt(svgTextElem.style("font-size").replace("px","")); 115 | var textLength = svgTextElem.text().length; 116 | 117 | return textLength * fontSize * 0.5; 118 | } 119 | return 0; 120 | }; 121 | 122 | /* Numbers that are undefined, null or NaN, convert them to zeros. 123 | */ 124 | nv.utils.NaNtoZero = function(n) { 125 | if (typeof n !== 'number' 126 | || isNaN(n) 127 | || n === null 128 | || n === Infinity) return 0; 129 | 130 | return n; 131 | }; 132 | 133 | /* 134 | Snippet of code you can insert into each nv.models.* to give you the ability to 135 | do things like: 136 | chart.options({ 137 | showXAxis: true, 138 | tooltips: true 139 | }); 140 | 141 | To enable in the chart: 142 | chart.options = nv.utils.optionsFunc.bind(chart); 143 | */ 144 | nv.utils.optionsFunc = function(args) { 145 | if (args) { 146 | d3.map(args).forEach((function(key,value) { 147 | if (typeof this[key] === "function") { 148 | this[key](value); 149 | } 150 | }).bind(this)); 151 | } 152 | return this; 153 | }; -------------------------------------------------------------------------------- /static/picture/retina.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /static/trans/-en.json: -------------------------------------------------------------------------------- 1 | { 2 | "master": "Master", 3 | "slave": "Slave", 4 | 5 | "time": "Time", 6 | "nodes-ok": "Normal nodes", 7 | "nodes-fail": "Failed nodes", 8 | "proxies-ok": "Normal proxies", 9 | "proxies-fail": "Failed proxies", 10 | 11 | "expnd-show-masters-only": "Master only", 12 | "expnd-sort": "Sort", 13 | "expnd-reverse": "Sort desc", 14 | 15 | "home": "First page", 16 | "previous-page": "Previous page", 17 | "back-to-cluster-panel": "Back to cluster panel", 18 | "next-page": "Next page", 19 | 20 | "need-autodiscover": "This Redis is not assigned to any cluster in DB, try Auto discover", 21 | "already-quit-message": "This Redis is not actually in the cluster. Quit anyway", 22 | "nodeMemSettingTitle": "Node memory setting title", 23 | 24 | "task-type-0": "Fix migrating", 25 | "task-type-1": "Migrate", 26 | "task-type-2": "Add master", 27 | "task-type-3": "Add slave", 28 | "task-type-4": "Remove Redis", 29 | "task-type-5": "Auto expanding", 30 | "task-type-6": "Batch ops", 31 | "task-type-7": "Launch", 32 | 33 | "event-type-0": "Create", 34 | "event-type-1": "Remove", 35 | "event-type-2": "Config", 36 | "event-type-3": "Command", 37 | 38 | "auto-expand-pod": "Pod for expanding", 39 | "machines-num": "Number of machines", 40 | "specifies-host-m": "Specifies host for master", 41 | "random-host": "Random", 42 | "with-slave": "With slave", 43 | "slave-node": "Slave node", 44 | "enable-aof": "Enable AOF", 45 | "confirm": "Confirm", 46 | "saved": "Saved", 47 | 48 | "": "" 49 | } 50 | -------------------------------------------------------------------------------- /static/trans/en.js: -------------------------------------------------------------------------------- 1 | window.TRANSLATIONS = { 2 | 'update': 'Update', 3 | 'delete': 'Delete', 4 | 'yes': 'Yes', 5 | 'no': 'No', 6 | 'failed': 'Failed', 7 | 'master': 'Master', 8 | 'slave': 'Slave', 9 | 'enabled': 'Enabled', 10 | 'disabled': 'Disabled', 11 | 12 | 'waiting for polling': 'WAITING FOR POLLING', 13 | 'serving': 'Serving', 14 | 'ready': 'Ready', 15 | 16 | 'task_step_launch': 'Launch', 17 | 'task_step_fix_migrate': 'Fix migrating', 18 | 'task_step_migrate': 'Migrate', 19 | 'task_step_join': 'Add master', 20 | 'task_step_replicate': 'Add slave', 21 | 'task_step_quit': 'Remove Redis', 22 | 'from': 'From', 23 | 'migrate_out': 'migrate out', 24 | 'slots_to': 'slot(s) to', 25 | 'add_slave': 'Add slave', 26 | 'to_master': 'to master', 27 | 'awaiting': 'Awaiting', 28 | 'processing': 'Processing', 29 | 'completed': 'Done', 30 | 31 | 'redis-containerize-port-range-invalid': 'Port should between 6000 and 7999', 32 | 'proxy-containerize-port-range-invalid': 'Port should between 8000 and 9999', 33 | 'not reachable': 'Not reachable', 34 | 35 | 'stats-used_cpu_user': 'User', 36 | 'stats-used_cpu_sys': 'Sys', 37 | 'stats-used_memory': 'Memory', 38 | 'stats-used_memory_rss': 'RSS', 39 | 'stats-connected_clients': 'Clients', 40 | 'stats-total_commands_processed': 'QPS', 41 | 'stats-expired_keys': 'Expired keys', 42 | 'stats-evicted_keys': 'Evicted keys', 43 | 'stats-keyspace_hits': 'Keyspace hits', 44 | 'stats-keyspace_misses': 'Keyspace misses', 45 | 'stats-keys': 'Keys', 46 | 'stats-mem_buffer_alloc': 'Buffering memory', 47 | 'stats-completed_commands': 'QPS', 48 | 'stats-command_elapse': 'Total', 49 | 'stats-remote_cost': 'Remote', 50 | 51 | /* BEGIN containerization exception detail */ 52 | 'Not enough core resources': 'Insufficient resource in specified host', 53 | /* END */ 54 | 55 | '': '' 56 | }; 57 | -------------------------------------------------------------------------------- /static/trans/zh.js: -------------------------------------------------------------------------------- 1 | window.TRANSLATIONS = { 2 | 'update': '改', 3 | 'delete': '删', 4 | 'yes': '是', 5 | 'no': '否', 6 | 'enabled': '开启', 7 | 'disabled': '关闭', 8 | 'failed': '失败', 9 | 'done': '完成', 10 | 11 | 'master': '主', 12 | 'slave': '从', 13 | 14 | 'Please wait': '请稍候', 15 | 'unknown error': '未知错误', 16 | 17 | 'invalid port format': '不正确的端口范围格式', 18 | 'registering': '正在注册', 19 | 'waiting for polling': '尚未获取节点详细信息', 20 | 'Redis unregistered': '节点已被移除', 21 | 'Proxy unregistered': '代理已被移除', 22 | 'Registered': '已注册', 23 | 'Remove': '移除此节点', 24 | 'Not in a cluster': '节点不在集群中', 25 | 'Loading cluster nodes': '正在载入集群信息', 26 | 'serving': 'Serving', 27 | 'ready': 'Ready', 28 | 29 | 'task_step_launch': '创建集群', 30 | 'task_step_fix_migrate': '修复迁移状态', 31 | 'task_step_migrate': '迁移槽位', 32 | 'task_step_join': '添加主节点', 33 | 'task_step_replicate': '添加从节点', 34 | 'task_step_quit': '移除节点', 35 | 'from': '从', 36 | 'migrate_out': '迁移', 37 | 'slots_to': '个槽位至', 38 | 'add_slave': '从节点为', 39 | 'to_master': '主节点为', 40 | 'Wait cluster to shutdown': '正在验证数据已清空并关闭集群', 41 | 'Cluster is closed': '集群已经关闭', 42 | 'Please manually flush the data in this Redis before shutdown': '此节点中还有数据, 请手动清理后重试', 43 | 'The cluster contains other Redis. Use auto discover to add them': '集群中有其他节点, 请使用自动发现刷新节点列表', 44 | 'Address is required': '请输入一个地址', 45 | 'Invalid address': '地址格式不正确', 46 | 47 | 'Node ID': '节点 ID', 48 | 'Address': '地址', 49 | 'Role': '角色', 50 | 'Master ID': '主节点 ID', 51 | 'Status': '状态', 52 | 'Slots': '槽位', 53 | 54 | 'awaiting': '等待', 55 | 'processing': '正在执行', 56 | 'completed': '完成', 57 | 58 | 'Myself': '当前节点', 59 | 60 | 'An instance is already serving at the address': '该地址的节点或代理已经存在', 61 | 'redis-containerize-port-range-invalid': '端口范围应该在 6000-7999 之间', 62 | 'proxy-containerize-port-range-invalid': '端口范围应该在 8000-9999 之间', 63 | 'Number of threads not selected': '请选择线程数量', 64 | 'Number of CPU slices not selected': '请选择 CPU 分片数量', 65 | 'Offline': '下线容器', 66 | 'This container will be removed, are you sure?': '确定要下线此容器吗?', 67 | 'not reachable': '无法连接', 68 | 69 | 'stats-used_cpu_user': '用户态', 70 | 'stats-used_cpu_sys': '内核态', 71 | 'stats-used_memory': '内存', 72 | 'stats-used_memory_rss': 'RSS', 73 | 'stats-connected_clients': '客户端连接数', 74 | 'stats-total_commands_processed': '平均每秒指令数', 75 | 'stats-expired_keys': 'Expired keys', 76 | 'stats-evicted_keys': 'Evicted keys', 77 | 'stats-keyspace_hits': 'Keyspace hits', 78 | 'stats-keyspace_misses': 'Keyspace misses', 79 | 'stats-keys': 'Keys', 80 | 'stats-mem_buffer_alloc': '缓冲区', 81 | 'stats-completed_commands': '平均每秒指令数', 82 | 'stats-command_elapse': '总共', 83 | 'stats-remote_cost': '远端', 84 | 85 | /* 开始 容器化异常信息 */ 86 | 'Not enough core resources': '指定机器的资源不足', 87 | /* 结束 */ 88 | 89 | '': '' 90 | }; 91 | -------------------------------------------------------------------------------- /templates/audit/nodes.html: -------------------------------------------------------------------------------- 1 | {% extends './base.html' %} 2 | 3 | {% import './blocks/pager.html' as pager %} 4 | 5 | {% block title %}Audit{% endblock %} 6 | 7 | {% block body %} 8 | 9 | {% block pagination %} 10 | 26 |
27 | {% endblock %} 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | {% for e in events %} 43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 55 | 56 | {% endfor %} 57 | 58 |
#AddressDeployTypeArgsTimeUser
{{ e.id }}{{ e.host|e }}:{{ e.port }}{{ e.event_domain|e if e.event_domain else '-' }}{{ e.args_json|e }}{{ e.creation|strftime }} 51 | {% autoescape false %} 52 | {{ render_user(e.user_id) }} 53 | {% endautoescape %} 54 |
59 | 60 | {{ self.pagination() }} 61 | {% endblock %} 62 | -------------------------------------------------------------------------------- /templates/base.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | {% block title %}{% endblock %} 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | {% if lang or g.lang %} 18 | 19 | {% endif %} 20 | 40 | {% block head %}{% endblock %} 41 | 42 | {% set body_cls = body_classes() %} 43 | 44 |
45 |
90 | 91 |
{% block body %}{% endblock %}
92 | 93 |
94 |
95 | 96 | © HunanTV Platform developers - v0.9.2-2016-09-19 97 | 98 | 99 |
100 |
101 | 102 | 103 | -------------------------------------------------------------------------------- /templates/blocks/modal.html: -------------------------------------------------------------------------------- 1 | {% macro head(id, title) %} 2 | 17 | {%- endmacro %} 18 | 19 | {%- macro modal(id, title, titleLcl) %} 20 | 33 | {%- endmacro %} 34 | -------------------------------------------------------------------------------- /templates/blocks/pager.html: -------------------------------------------------------------------------------- 1 | {%- macro pager(page) %} 2 | {{- caller() }} 3 | 23 | 45 | {%- endmacro %} 46 | -------------------------------------------------------------------------------- /templates/cluster/create.html: -------------------------------------------------------------------------------- 1 | {% extends './base.html' %} 2 | 3 | {% block title %}Create Cluster{% endblock %} 4 | 5 | {% block head %} 6 | 7 | 8 | {% endblock %} 9 | 10 | {% block body %} 11 |
12 |
Create new cluster
13 |
14 |
15 |
16 | Description 17 |
18 |
19 | 20 |
21 |
22 |
23 |
24 | Select Redis instances: 25 |
26 |
27 |
28 |
29 |
30 | 31 |
32 |
33 |
34 |
35 | 36 | 37 | 75 | {% endblock %} 76 | -------------------------------------------------------------------------------- /templates/cluster/inactive.html: -------------------------------------------------------------------------------- 1 | {% extends './base.html' %} 2 | 3 | {% block title %}{{ cluster.description|e }} - Cluster Panel{% endblock %} 4 | 5 | {% block head %} 6 | 7 | {% endblock %} 8 | 9 | {% block body %} 10 |
11 |
12 |
13 |
Cluster ID
14 |
{{ cluster.id }}
15 |
16 | 17 |
18 |
Description
19 |
{{ cluster.description|e }}
20 |
21 | 22 |
23 |
Creation
24 |
{{ cluster.creation|strftime }}
25 |
26 |
27 |
28 | 29 |
30 |
31 | Proxy 32 | All proxies should be unregistered / removed before deleting the cluster 33 |
34 |
35 | {%- for proxy in cluster.proxies %} 36 |
37 |
38 | 39 | {%- if proxy.containerized %}{{ icon('cube') }}{% endif %} 40 |
41 | 42 | {%- if proxy.containerized %} 43 | {{ button('Remove Container', size=3, color='danger', cls=['btn-del-container', 'btn-block'], data={'type': 'proxy', 'cid': proxy.eru_container_id}, lcl='btn-remove-container') }} 44 | {%- else %} 45 | {{ button('Unregister', size=3, color='danger', cls=['delete-proxy-btn', 'btn-block'], data={'host': proxy.host, 'port': proxy.port}, lcl='btn-remove') }} 46 | {%- endif %} 47 |
48 | {% endfor %} 49 |
50 |
51 | 52 | {%- set tasks = cluster.get_tasks() %} 53 |
54 |
55 | Tasks 56 | All tasks should be pruned before deleting the cluster 57 | Prune 58 |
59 | {% autoescape false %} 60 | {{ render('components/cluster/tasks.html', tasks=tasks) }} 61 | {% endautoescape %} 62 |
63 | 64 | {% if cluster.proxies|length == 0 and tasks|length == 0 %} 65 |
66 |
67 |
68 | 69 |
70 |
71 |
72 | 73 | 87 | {% endif %} 88 | 89 | {% endblock %} 90 | -------------------------------------------------------------------------------- /templates/cluster/tasks.html: -------------------------------------------------------------------------------- 1 | {% extends './cluster/tasks_all.html' %} 2 | 3 | {% import './blocks/pager.html' as pager %} 4 | 5 | {% block title %}Cluster Task{% endblock %} 6 | 7 | {% block pagination %} 8 | 13 | {% endblock %} 14 | -------------------------------------------------------------------------------- /templates/cluster/tasks_all.html: -------------------------------------------------------------------------------- 1 | {% extends './base.html' %} 2 | 3 | {% import './blocks/pager.html' as pager %} 4 | 5 | {% block title %}All Tasks{% endblock %} 6 | 7 | {% block head %} 8 | 9 | {% endblock %} 10 | 11 | {% block body %} 12 | 13 | {% block pagination %} 14 | 30 |
31 | {% endblock %} 32 | 33 | {% autoescape false %} 34 | {{ render('components/cluster/tasks.html', tasks=tasks) }} 35 | {% endautoescape %} 36 | 37 | {{ self.pagination() }} 38 | 39 | {% endblock %} 40 | -------------------------------------------------------------------------------- /templates/components/button.html: -------------------------------------------------------------------------------- 1 | {% extends 'components/grid-group.html' %} 2 | 3 | {% block content %} 4 | 13 | {% endblock %} 14 | -------------------------------------------------------------------------------- /templates/components/checkbox.html: -------------------------------------------------------------------------------- 1 | {% extends 'components/grid-group.html' %} 2 | 3 | {% block content %} 4 | 12 | {% endblock %} 13 | -------------------------------------------------------------------------------- /templates/components/cluster/eru_auto_balance.html: -------------------------------------------------------------------------------- 1 |
2 |
3 |
4 | 用于自动申请节点的机房: 5 | 10 | 指定分配容器的机器 11 | 14 |
15 |
16 | 配备从节点 17 | 18 | {% set slave_count = plan.slaves|length if plan.slaves else 0 %} 19 | {{ slave_count }} 20 | 21 | {% for i in range(plan_max_slaves) %} 22 |
23 | 从节点 #{{ i }} 24 | 27 |
28 | {% endfor %} 29 |
30 | 31 |
32 | 33 |
34 | 35 | 36 | 37 |
38 |
39 | 40 | 136 | -------------------------------------------------------------------------------- /templates/components/cluster/export-nodes.html: -------------------------------------------------------------------------------- 1 | {% import 'blocks/modal.html' as modal %} 2 | 3 | {{ modal.head('exportNodes', '集群节点列表') }} 4 |
5 | {{ checkbox('只显示主节点', size=3, id='expnd-show-masters-only', lcl='expnd-show-masters-only') }} 6 | {{ button('按地址排序', size=2, id='expnd-sort', lcl='expnd-sort') }} 7 | {{ button('倒转顺序', size=2, id='expnd-reverse', lcl='expnd-reverse') }} 8 |
9 | 10 | {{ modal.tail() }} 11 | 12 | 43 | -------------------------------------------------------------------------------- /templates/components/cluster/nodes-add.html: -------------------------------------------------------------------------------- 1 | {% import 'blocks/modal.html' as modal %} 2 | 3 | {{ modal.head('nodesAdd', 'Batch Add Redis') }} 4 |
5 |
6 |
7 |
8 |
9 | 10 |
11 |
12 |
13 |
14 | {{ modal.tail() }} 15 | 16 | 45 | -------------------------------------------------------------------------------- /templates/components/cluster/row.html: -------------------------------------------------------------------------------- 1 | {% import './components/widgets.html' as widgets %} 2 | 3 | 4 | {{ cluster.id }} 5 | {{ cluster.description|e }} 6 | {{ cluster.nodes|length }} 7 | 8 | {% if cluster.proxies %} 9 |
10 | {% for proxy in cluster.proxies %} 11 |
12 | 17 |
18 | {% if proxy.detail.read_slave %}R{% endif %} 19 |
20 |
21 | {% if proxy.stat %} 22 | OK 23 | {% else %} 24 | E 25 | {% endif %} 26 |
27 |
28 | {% if proxy.suppress_alert %}{% endif %} 29 |
30 |
31 | {{ proxy.detail.version|e }} 32 |
33 |
34 | {{ widgets.stat_icon('proxy', proxy.host, proxy.port) }} 35 |
36 |
37 | {% endfor %} 38 |
39 | {% else %} 40 |
No Proxy
41 | {% endif %} 42 | 43 | 44 | -------------------------------------------------------------------------------- /templates/components/cluster/tasks.html: -------------------------------------------------------------------------------- 1 |
2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | {% for task in tasks %} 16 | 17 | 18 | 19 | 20 | 21 | 35 | 38 | 43 | 44 | {% endfor %} 45 | 46 |
#ClusterTypeCreation timeStatusDetailCreator
{{ task.id }}#{{ task.cluster_id }}{{ task.task_type }}{{ task.creation|strftime }} 22 | {% if task.completed %} 23 | {% if task.exec_error %} 24 | Failed 25 | {% else %} 26 | Done 27 | {% endif %} 28 | {{ task.completion|strftime }} 29 | {% elif task.running %} 30 | Running 31 | {% else %} 32 | Awaiting 33 | {% endif %} 34 | 36 | 37 | 39 | {% autoescape false %} 40 | {{ render_user(task.user_id) }} 41 | {% endautoescape %} 42 |
47 |
48 | 49 | 85 | -------------------------------------------------------------------------------- /templates/components/command_console.html: -------------------------------------------------------------------------------- 1 | {% import './blocks/modal.html' as modal %} 2 | 3 | {%- call modal.modal('commandConsole', 'Command Console', 'command-console') %} 4 |

  5 | 
  6 | {%- endcall %}
  7 | 
  8 | 
107 | 


--------------------------------------------------------------------------------
/templates/components/grid-group.html:
--------------------------------------------------------------------------------
1 | 
2 | {% block content %}{% endblock %} 3 |
4 | -------------------------------------------------------------------------------- /templates/components/hint.html: -------------------------------------------------------------------------------- 1 | {% extends 'components/grid-group.html' %} 2 | 3 | {% block content %} 4 | 9 | {{ text|e }} 10 | 11 | {% endblock %} 12 | -------------------------------------------------------------------------------- /templates/components/icon.html: -------------------------------------------------------------------------------- 1 | {% if color %}{% endif %} 2 | 3 | {% if color %}{% endif %} 4 | -------------------------------------------------------------------------------- /templates/components/input.html: -------------------------------------------------------------------------------- 1 | {% extends 'components/grid-group.html' %} 2 | 3 | {% block content %} 4 | {% if addon %} 5 |
6 | {{ addon }} 7 | {% endif %} 8 | 9 | 15 | 16 | {% if addon %} 17 |
18 | {% endif %} 19 | {% endblock %} 20 | -------------------------------------------------------------------------------- /templates/components/label.html: -------------------------------------------------------------------------------- 1 | {% extends 'components/grid-group.html' %} 2 | 3 | {% block content %} 4 |
5 | 10 | {{ text|e }} 11 | 12 |
13 | {% endblock %} 14 | -------------------------------------------------------------------------------- /templates/components/node/autodiscover.html: -------------------------------------------------------------------------------- 1 | {% import './blocks/modal.html' as modal %} 2 | 3 | {% call modal.modal('clusterAutoDiscover', 'Auto discover', 'btn-autodiscover') %} 4 | 5 |
6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 20 | 21 | 22 | 23 |
24 | {%- endcall %} 25 | 26 | 100 | -------------------------------------------------------------------------------- /templates/components/node/row.html: -------------------------------------------------------------------------------- 1 | {% import './components/widgets.html' as widgets %} 2 | 3 | 4 | {% set cluster = node.assignee %} 5 | {% set awaiting_polling = not node.stat and not node.detail.used_memory %} 6 | 7 | {{ node.host|e }}:{{ node.port }} 8 | {% if node.containerized %} 9 | 10 | {% endif %} 11 | 12 | 13 | {% if awaiting_polling %} 14 |

WAIT FOR POLLING

15 | {% else %} 16 | {{ node.detail.used_memory_human }} 17 | {% if node.detail.maxmemory %} 18 | / {{ node.detail.maxmemory|filesizeformat }} 19 | {{ (node.detail.used_memory * 100 / node.detail.maxmemory)|round(2) }}% 20 | {% else %} 21 | | maxmemory not set 22 | {% endif %} 23 | {% endif %} 24 | 25 | 26 | {% if awaiting_polling %} 27 |

WAIT FOR POLLING

28 | {% else %} 29 | {% if node.stat %} 30 | {% if cluster is none %} 31 | Ready 32 | {% else %} 33 | Serving 34 | | 35 | #{{ cluster.id }} 36 | | 37 | {% if node.detail.slave %} 38 | Slave 39 | {% else %} 40 | Master 41 | {% endif %} 42 | {% endif %} 43 | {% else %} 44 | 45 | {% endif %} 46 | 47 | {% if node.detail.slots_migrating %} 48 | Migrating 49 | {% endif %} 50 | {% endif %} 51 | 52 | 53 | {{ widgets.stat_icon('redis', node.host, node.port) }} 54 | {{ widgets.suppress_btn('redis', node.host, node.port, node.suppress_alert) }} 55 | {% if node.detail.slots_migrating %} 56 | 57 | {% endif %} 58 | 59 | 60 | -------------------------------------------------------------------------------- /templates/components/proxy/label.html: -------------------------------------------------------------------------------- 1 | {{ proxy.host|e }}:{{ proxy.port }} 2 | {% if proxy.detail.read_slave %} 3 | R 4 | {% endif %} 5 | {% if proxy.stat %} 6 | OK 7 | {% else %} 8 | E 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /templates/components/select.html: -------------------------------------------------------------------------------- 1 | {% extends 'components/grid-group.html' %} 2 | 3 | {% block content %} 4 | 12 | {% endblock %} 13 | -------------------------------------------------------------------------------- /templates/components/widgets.html: -------------------------------------------------------------------------------- 1 | {%- macro stat_icon(type, host, port, bar_before=False, bar_after=False) %} 2 | 3 | {%- if bar_before %}|{% endif %} 4 | {{ icon('line-chart') }} 5 | {%- if bar_after %}|{% endif %} 6 | 7 | {%- endmacro %} 8 | 9 | {%- macro suppress_btn(type, host, port, suppress) %} 10 | 13 | {%- endmacro %} 14 | -------------------------------------------------------------------------------- /templates/containerize/deploy/list_base.html: -------------------------------------------------------------------------------- 1 | {% extends './base.html' %} 2 | 3 | {% import './blocks/pager.html' as pager %} 4 | 5 | {% block head %} 6 | 7 | {% endblock %} 8 | 9 | {% block body %} 10 | 11 | {% block pagination %} 12 | 26 |
27 | {% endblock %} 28 | 29 | {% block table %}{% endblock %} 30 | 31 | {{ self.pagination() }} 32 | 33 | {% endblock %} 34 | -------------------------------------------------------------------------------- /templates/containerize/deploy/list_proxies.html: -------------------------------------------------------------------------------- 1 | {% extends './containerize/deploy/list_base.html' %} 2 | 3 | {% block title %}Containerized Proxy{% endblock %} 4 | 5 | {% block table %} 6 | {% autoescape false %} 7 | {{ render('containerize/deploy/proxy_table.html', proxies=proxies) }} 8 | {% endautoescape %} 9 | {% endblock %} 10 | -------------------------------------------------------------------------------- /templates/containerize/deploy/list_redis.html: -------------------------------------------------------------------------------- 1 | {% extends './containerize/deploy/list_base.html' %} 2 | 3 | {% block title %}Containerized Redis{% endblock %} 4 | 5 | {% block table %} 6 | {% autoescape false %} 7 | {{ render('containerize/deploy/redis_table.html', nodes=nodes) }} 8 | {% endautoescape %} 9 | {% endblock %} 10 | -------------------------------------------------------------------------------- /templates/containerize/deploy/no_pod.html: -------------------------------------------------------------------------------- 1 | {% extends './base.html' %} 2 | 3 | {% block title %}Containerization Unavailable{% endblock %} 4 | 5 | {% block body %} 6 |
7 |

No pod available

8 |

9 | Containerizing service runs at 10 | {{ g.container_client|e }}, 11 | but it does not provide any available pod; please check the service, and refresh this page. 12 |

13 |
14 | {% endblock %} 15 | -------------------------------------------------------------------------------- /templates/containerize/deploy/proxy_table.html: -------------------------------------------------------------------------------- 1 | {% extends './containerize/deploy/table_base.html' %} 2 | 3 | {%- block table_id %}proxy-table{% endblock %} 4 | 5 | {%- block table_head %} 6 | Container ID 7 | Image Version 8 | Host Address 9 | Creation 10 | Address 11 | Ops 12 | {% endblock %} 13 | 14 | {%- block table_body %} 15 | {% for n in proxies %} 16 | 17 | {{ n.eru_container_id[:16]|e }} 18 | {{ n.container_info.version|e }} 19 | {{ n.container_info.host|e }} 20 | {{ n.container_info.created|e }} 21 | {{ n.host|e }}:{{ n.port }} 22 | 23 | {{ button('Offline', color='danger', cls=['btn-del-container'], data={'type': 'proxy', 'cid': n.eru_container_id}, lcl='offline') }} 24 | 25 | 26 | {% endfor %} 27 | {% endblock %} 28 | -------------------------------------------------------------------------------- /templates/containerize/deploy/redis_table.html: -------------------------------------------------------------------------------- 1 | {% extends './containerize/deploy/table_base.html' %} 2 | 3 | {%- block table_id %}redis-table{% endblock %} 4 | 5 | {%- block table_head %} 6 | Container ID 7 | Image Version 8 | Host Address 9 | Creation 10 | Address 11 | Cluster mode 12 | Ops 13 | {% endblock %} 14 | 15 | {%- block table_body %} 16 | {% for n in nodes %} 17 | 18 | {{ n.eru_container_id[:16]|e }} 19 | {{ n.container_info.version|e }} 20 | {{ n.container_info.host|e }} 21 | {{ n.container_info.created|e }} 22 | {{ n.host|e }}:{{ n.port }} 23 | 24 | {% if n.detail %} 25 | {{ n.detail.cluster_enabled }} 26 | {% else %} 27 | - 28 | {% endif %} 29 | 30 | 31 | {% if n.assignee_id %} 32 | Serving 33 | {% else %} 34 | {{ button('Offline', color='danger', cls=['btn-del-container'], data={'type': 'node', 'cid': n.eru_container_id}, lcl='offline') }} 35 | {% endif %} 36 | 37 | 38 | {% endfor %} 39 | {% endblock %} 40 | -------------------------------------------------------------------------------- /templates/containerize/deploy/table_base.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | {%- block table_head %}{% endblock %} 5 | 6 | 7 | 8 | {%- block table_body %}{% endblock %} 9 | 10 |
11 | -------------------------------------------------------------------------------- /templates/containerize/image/manage_redis.html: -------------------------------------------------------------------------------- 1 | {% extends './base.html' %} 2 | 3 | {% block title %}Manage Redis Images{% endblock %} 4 | 5 | {% block body %} 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 |
Active Images
#NameCreationDescriptionOps
19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 |
Remote Images
NameCreationDescriptionAdd
32 | 33 | 155 | {% endblock %} 156 | -------------------------------------------------------------------------------- /templates/index.html: -------------------------------------------------------------------------------- 1 | {% extends './base.html' %} 2 | 3 | {% block title %}Redis Ctl{% endblock %} 4 | 5 | {% block head %} 6 | 11 | 12 | 13 | {% endblock %} 14 | 15 | {% block body %} 16 |
17 |
Clusters Info
18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | {% autoescape false %} 29 | {% for cluster in clusters %} 30 | {{ render('components/cluster/row.html', cluster=cluster) }} 31 | {% endfor %} 32 | {% endautoescape %} 33 | 34 |
#DescriptionRedisProxy
35 |
36 |
37 |
38 |
39 |
40 | Redis Info 41 |
42 |
43 | 48 |
49 |
50 |
51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | {% autoescape false %} 62 | {% for node in nodes %} 63 | {{ render('components/node/row.html', node=node, stats_enabled=stats_enabled) }} 64 | {% endfor %} 65 | {% endautoescape %} 66 | 67 |
AddressMemory UsageCluster
68 |
69 | 91 | 92 | {% endblock %} 93 | -------------------------------------------------------------------------------- /templates/myself/thirdparty.html: -------------------------------------------------------------------------------- 1 | {% extends './base.html' %} 2 | 3 | {% block title %}Thirdparty utilities{% endblock %} 4 | 5 | {% macro line(item, title, lcl) %} 6 |
7 |
8 |
{{ title }}
9 |
{{ item }}
10 |
11 |
12 | {% endmacro %} 13 | 14 | {% block body %} 15 |
16 | {{ line(app.container_client, 'containerize', 'Containerize') }} 17 | {{ line(app.stats_client, 'statistic', 'Statistic') }} 18 | {{ line(app.alarm_client, 'alarm', 'Alarm') }} 19 |
20 | {% endblock %} 21 | -------------------------------------------------------------------------------- /templates/pollings.html: -------------------------------------------------------------------------------- 1 | {% extends './base.html' %} 2 | 3 | {% block title %}Pollings{% endblock %} 4 | 5 | {% block body %} 6 |
7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | {% for p in pollings %} 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | {% endfor %} 29 | 30 |
#TimeHealthy RedisUnreachable RedisHealthy ProxyUnreachable Proxy
{{ p.id }}{{ p.polling_time|strftime }}{{ p.nodes_ok|length }}{{ p.nodes_fail|length }}{{ p.proxies_ok|length }}{{ p.proxies_fail|length }}
31 |
32 | 33 | {% block prune %} 34 |
35 | Prune 36 |
37 | {% endblock %} 38 | 39 | {% endblock %} 40 | -------------------------------------------------------------------------------- /templates/prune/audit.html: -------------------------------------------------------------------------------- 1 | {% extends './audit/nodes.html' %} 2 | 3 | {% block title %}Audit Prune{% endblock %} 4 | 5 | {% block head %} 6 | 7 | {% endblock %} 8 | 9 | {% block pagination %}{% endblock %} 10 | 11 | {% block body %} 12 | {% if first is not none %} 13 | {{ super() }} 14 |
15 | Click to prune audit logs before 16 | {{ datetime|strftime }} 17 | 18 |
19 | {% else %} 20 |

No records to prune

21 | {% endif %} 22 | {% endblock %} 23 | -------------------------------------------------------------------------------- /templates/prune/pollings.html: -------------------------------------------------------------------------------- 1 | {% extends './pollings.html' %} 2 | 3 | {% block title %}Pollings Prune{% endblock %} 4 | 5 | {% block head %} 6 | 7 | {% endblock %} 8 | 9 | {% block prune %}{% endblock %} 10 | 11 | {% block body %} 12 | {% if first is not none %} 13 | {{ super() }} 14 |
15 |
16 |
17 |
18 | Click to prune pollings before 19 | {{ datetime|strftime }} 20 |
21 |
22 |
23 | 24 |
25 | 26 |
27 |
28 | Back 29 |
30 |
31 |
32 |
33 |
34 | {% else %} 35 |

36 | No records to prune. 37 | Back 38 |

39 | {% endif %} 40 | {% endblock %} 41 | -------------------------------------------------------------------------------- /templates/prune/tasks.html: -------------------------------------------------------------------------------- 1 | {% extends './cluster/tasks_all.html' %} 2 | 3 | {% block title %}Prune Tasks{% endblock %} 4 | 5 | {% block head %} 6 | {{ super() }} 7 | 8 | {% endblock %} 9 | 10 | {% block pagination %}{% endblock %} 11 | 12 | {% block body %} 13 | {% if first is not none %} 14 | {{ super() }} 15 |
16 | Click to prune tasks before 17 | {{ datetime|strftime }} 18 | 19 |
20 | {% else %} 21 |

No records to prune

22 | {% endif %} 23 | {% endblock %} 24 | -------------------------------------------------------------------------------- /templates/redis/create.html: -------------------------------------------------------------------------------- 1 | {% extends './base.html' %} 2 | 3 | {% block title %}Register Redis{% endblock %} 4 | 5 | {% block body %} 6 |
7 |
Register Redis
8 |
9 |
10 | {{ label('Host', lcl='register-node-host') }} 11 | {{ input(size=6, id='add-nodes-host') }} 12 | {{ hint('IP or domain name', size=4, lcl='register-host-hint') }} 13 |
14 |
15 | {{ label('Port', lcl='register-node-port') }} 16 | {{ input(size=6, id='add-nodes-port') }} 17 | {{ hint('Port number or range, like 6379, 6000-6020', size=4, lcl='register-port-hint') }} 18 |
19 |
20 | {{ button('Register', size=2, offset=2, id='add-nodes', color='primary') }} 21 |
22 | 23 |
24 |
25 |
26 |
27 | 28 | 78 | {% endblock %} 79 | -------------------------------------------------------------------------------- /templates/redis/not_found.html: -------------------------------------------------------------------------------- 1 | {% extends './base.html' %} 2 | 3 | {% block title %}Redis Not Found{% endblock %} 4 | 5 | {% block body %} 6 |

Redis @ {{ host|e }} {{ port }} is not registered or has been removed. You may

7 | 11 | 12 | 22 | {% endblock %} 23 | -------------------------------------------------------------------------------- /templates/stats/proxy.html: -------------------------------------------------------------------------------- 1 | {% extends './stats/base.html' %} 2 | {% block title %}Proxy Status History{% endblock %} 3 | 4 | {% block graph %} 5 |
6 |

Connections

7 |

CPU

8 |

Memory Usage

9 |

Command Elapse

10 |
11 | 53 | {% endblock %} 54 | -------------------------------------------------------------------------------- /templates/stats/redis.html: -------------------------------------------------------------------------------- 1 | {% extends './stats/base.html' %} 2 | {% block title %}Redis Status History{% endblock %} 3 | 4 | {% block graph %} 5 |
6 |

CPU

7 |

Memory Usage

8 |

Connections

9 |

Storage

10 |
11 | 64 | {% endblock %} 65 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/projecteru/redis-ctl/16ae59b6dfe3d62ecb59951bd81395c370b005ef/test/__init__.py -------------------------------------------------------------------------------- /test/alarm.py: -------------------------------------------------------------------------------- 1 | import time 2 | 3 | import base 4 | import models.node as nm 5 | from models.base import commit_session 6 | from thirdparty import alarm 7 | from daemonutils.node_polling import NodeStatCollector 8 | 9 | 10 | class FakePoller(NodeStatCollector): 11 | def __init__(self, app): 12 | NodeStatCollector.__init__(self, app, 0) 13 | 14 | def poll_once(self): 15 | with self.app.app_context(): 16 | self._shot() 17 | 18 | 19 | class Containerize(base.TestCase): 20 | def reset_db(self): 21 | with self.app.app_context(): 22 | nm.RedisNode.query.delete() 23 | self.db.session.commit() 24 | 25 | def test_alarm(self): 26 | class TestAlarmClient(alarm.Base): 27 | def __init__(self): 28 | self.alarms = {} 29 | 30 | def send_alarm(self, endpoint, message, exception, **kwargs): 31 | self.alarms[(endpoint.host, endpoint.port)] = ( 32 | message, exception) 33 | self.app.replace_alarm_client(TestAlarmClient()) 34 | p = FakePoller(self.app) 35 | 36 | nm.create_instance('127.0.0.1', 29000) 37 | commit_session() 38 | self.app.write_polling_targets() 39 | p.poll_once() 40 | self.assertEqual(0, len(self.app.alarm_client.alarms)) 41 | 42 | n = nm.get_by_host_port('127.0.0.1', 29000) 43 | n.suppress_alert = False 44 | commit_session() 45 | self.app.write_polling_targets() 46 | p.poll_once() 47 | self.assertEqual(1, len(self.app.alarm_client.alarms)) 48 | 49 | def test_timed(self): 50 | CD = 5 51 | 52 | class TestTimedClient(alarm.Timed): 53 | def __init__(self): 54 | alarm.Timed.__init__(self, CD) 55 | self.alarms = [] 56 | 57 | def do_send_alarm(self, endpoint, message, exception, **kwargs): 58 | self.alarms.append({ 59 | 'endpoint': endpoint, 60 | 'message': message, 61 | }) 62 | 63 | self.app.replace_alarm_client(TestTimedClient()) 64 | p = FakePoller(self.app) 65 | 66 | nm.create_instance('127.0.0.1', 29000) 67 | commit_session() 68 | self.app.write_polling_targets() 69 | p.poll_once() 70 | self.assertEqual(0, len(self.app.alarm_client.alarms)) 71 | 72 | n = nm.get_by_host_port('127.0.0.1', 29000) 73 | n.suppress_alert = False 74 | commit_session() 75 | self.app.write_polling_targets() 76 | p.poll_once() 77 | self.assertEqual(1, len(self.app.alarm_client.alarms)) 78 | 79 | p.poll_once() 80 | self.assertEqual(1, len(self.app.alarm_client.alarms)) 81 | 82 | time.sleep(CD + 1) 83 | p.poll_once() 84 | self.assertEqual(2, len(self.app.alarm_client.alarms)) 85 | -------------------------------------------------------------------------------- /test/base.py: -------------------------------------------------------------------------------- 1 | import os 2 | import errno 3 | import hashlib 4 | import tempfile 5 | import unittest 6 | 7 | import config 8 | import daemonutils.cluster_task 9 | import daemonutils.auto_balance 10 | import models.base 11 | from app import RedisCtl 12 | 13 | config.LOG_FILE = os.path.join(tempfile.gettempdir(), 'redisctlpytest') 14 | config.PERMDIR = os.path.join(tempfile.gettempdir(), 'redisctlpytestpermdir') 15 | config.POLL_INTERVAL = 0 16 | config.ERU_URL = None 17 | config.ERU_NETWORK = 'net' 18 | config.ERU_GROUP = 'group' 19 | unittest.TestCase.maxDiff = None 20 | 21 | try: 22 | os.makedirs(config.PERMDIR) 23 | except OSError as exc: 24 | if exc.errno == errno.EEXIST and os.path.isdir(config.PERMDIR): 25 | pass 26 | 27 | 28 | class TestApp(RedisCtl): 29 | def __init__(self): 30 | RedisCtl.__init__(self, config) 31 | 32 | def db_uri(self, config): 33 | try: 34 | return config.TEST_SQLALCHEMY_DATABASE_URI 35 | except AttributeError: 36 | raise ValueError('TEST_SQLALCHEMY_DATABASE_URI should be' 37 | ' specified in override_config for unittest') 38 | 39 | def replace_container_client(self, client=None): 40 | if client is None: 41 | client = FakeContainerClientBase() 42 | self.container_client = client 43 | return client 44 | 45 | def replace_alarm_client(self, client=None): 46 | self.alarm_client = client 47 | 48 | def init_stats_client(self, config): 49 | return None 50 | 51 | def init_alarm_client(self, config): 52 | return None 53 | 54 | def init_container_client(self, config): 55 | return None 56 | 57 | 58 | class TestCase(unittest.TestCase): 59 | def __init__(self, *args, **kwargs): 60 | unittest.TestCase.__init__(self, *args, **kwargs) 61 | self.app = TestApp() 62 | self.app.register_blueprints() 63 | self.db = models.base.db 64 | 65 | def reset_db(self): 66 | with self.app.app_context(): 67 | models.base.db.session.close() 68 | models.base.db.drop_all() 69 | models.base.db.create_all() 70 | 71 | def setUp(self): 72 | print '' 73 | print '[- Setup -]', self._testMethodName 74 | self.reset_db() 75 | self.app.write_polling_targets() 76 | print '[+ Start +]', self._testMethodName 77 | 78 | def tearDown(self): 79 | print '[[ Done ]]', self._testMethodName 80 | 81 | def replace_eru_client(self, client=None): 82 | return self.app.replace_container_client(client) 83 | 84 | def run(self, result=None): 85 | if not (result and (result.failures or result.errors)): 86 | unittest.TestCase.run(self, result) 87 | 88 | def exec_all_tasks(self, trials=20000): 89 | while trials > 0: 90 | trials -= 1 91 | 92 | tasks = list(models.task.undone_tasks()) 93 | if len(tasks) == 0: 94 | return 95 | 96 | t = daemonutils.cluster_task.try_create_exec_thread_by_task( 97 | tasks[0], self.app) 98 | self.assertIsNotNone(t) 99 | t.run() 100 | raise AssertionError('Pending tasks not finished') 101 | 102 | def assertReqStatus(self, status_code, r): 103 | if status_code != r.status_code: 104 | raise AssertionError('\n'.join([ 105 | 'Response status code not same:', 106 | ' expected: %d' % status_code, 107 | ' actual: %d' % r.status_code, 108 | ' response data: %s' % r.data, 109 | ])) 110 | 111 | 112 | class FakeContainerClientBase(object): 113 | def __init__(self): 114 | self.next_container_id = 0 115 | self.deployed = {} 116 | 117 | def deploy_with_network(self, what, pod, entrypoint, ncore=1, host=None, 118 | args=None): 119 | network = {'id': 'network:%s' % what} 120 | version_sha = hashlib.sha1(what).hexdigest() 121 | r = self.deploy_private( 122 | 'group', pod, what, ncore, 1, version_sha, 123 | entrypoint, 'prod', [network['id']], host_name=host, args=args) 124 | task_id = r['tasks'][0] 125 | 126 | cid = -task_id 127 | container_info = { 128 | 'networks': [{'address': '10.0.0.%d' % cid}], 129 | 'host': '172.10.0.%d' % cid, 130 | 'created': '2000-01-01 07:00:00', 131 | } 132 | addr = container_info['networks'][0]['address'] 133 | created = container_info['created'] 134 | return { 135 | 'version': version_sha, 136 | 'container_id': cid, 137 | 'address': addr, 138 | 'host': host, 139 | 'created': created, 140 | } 141 | 142 | def deploy_redis(self, pod, aof, netmode, cluster=True, host=None, 143 | port=6379, *args, **kwarge): 144 | return self.deploy_with_network('redis', pod, netmode, host=host, 145 | args=[]) 146 | 147 | def deploy_proxy(self, pod, threads, read_slave, netmode, host=None, 148 | port=8889, *args, **kwarge): 149 | return self.deploy_with_network( 150 | 'cerberus', pod, netmode, ncore=threads, host=host, args=[]) 151 | 152 | def rm_containers(self, container_ids): 153 | for i in container_ids: 154 | del self.deployed[i] 155 | 156 | def revive_container(self, container_id): 157 | pass 158 | 159 | def deploy_private(self, group, pod, what, ncont, ncore, version_sha, 160 | entrypoint, env, network, host_name=None, args=None): 161 | self.next_container_id += 1 162 | self.deployed[self.next_container_id] = { 163 | 'group': group, 164 | 'pod': pod, 165 | 'what': what, 166 | 'ncontainers': ncont, 167 | 'ncores': ncore, 168 | 'version': version_sha, 169 | 'entrypoint': entrypoint, 170 | 'env': env, 171 | 'network': network, 172 | 'host_name': host_name or None, 173 | } 174 | return {'msg': 'ok', 'tasks': [-self.next_container_id]} 175 | -------------------------------------------------------------------------------- /test/cluster.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import base 4 | import models.task 5 | from models.base import commit_session 6 | 7 | 8 | class ClusterTest(base.TestCase): 9 | def test_create_delete_cluster(self): 10 | with self.app.test_client() as client: 11 | r = client.post('/redis/add', data={ 12 | 'host': '127.0.0.1', 13 | 'port': '7100', 14 | }) 15 | self.assertReqStatus(200, r) 16 | self.assertEqual({ 17 | 'nodes': [{ 18 | 'host': '127.0.0.1', 19 | 'port': 7100, 20 | 'suppress_alert': 1, 21 | }], 22 | 'proxies': [], 23 | }, self.app.polling_targets()) 24 | 25 | with self.app.test_client() as client: 26 | r = client.post('/cluster/add', data={ 27 | 'descr': 'the-quick-brown-fox', 28 | }) 29 | self.assertReqStatus(200, r) 30 | cluster_id = int(r.data) 31 | 32 | r = client.post('/task/launch', data=json.dumps({ 33 | 'cluster': cluster_id, 34 | 'nodes': [{ 35 | 'host': '127.0.0.1', 36 | 'port': 7100, 37 | }], 38 | })) 39 | self.assertReqStatus(200, r) 40 | self.exec_all_tasks() 41 | 42 | with self.app.test_client() as client: 43 | r = client.post('/cluster/shutdown', data={ 44 | 'cluster_id': cluster_id, 45 | }) 46 | self.assertReqStatus(200, r) 47 | self.exec_all_tasks() 48 | 49 | tasks = models.task.ClusterTask.query.all() 50 | self.assertEqual(1, len(tasks)) 51 | self.assertEqual(cluster_id, tasks[0].cluster_id) 52 | 53 | with self.app.test_client() as client: 54 | r = client.post('/cluster/delete', data={ 55 | 'id': cluster_id, 56 | }) 57 | self.assertReqStatus(400, r) 58 | 59 | models.task.TaskStep.query.filter_by(task_id=tasks[0].id).delete() 60 | models.task.ClusterTask.query.delete() 61 | commit_session() 62 | 63 | with self.app.test_client() as client: 64 | r = client.post('/cluster/delete', data={ 65 | 'id': cluster_id, 66 | }) 67 | self.assertReqStatus(200, r) 68 | -------------------------------------------------------------------------------- /test/containerize.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import base 4 | import models.node 5 | from app.bps.containerize import bp 6 | 7 | 8 | class Containerize(base.TestCase): 9 | def __init__(self, *args, **kwargs): 10 | base.TestCase.__init__(self, *args, **kwargs) 11 | self.app.register_blueprint(bp) 12 | 13 | def reset_db(self): 14 | with self.app.app_context(): 15 | models.node.RedisNode.query.delete() 16 | self.db.session.commit() 17 | 18 | def test_port_offset(self): 19 | class ClientOffset(base.FakeContainerClientBase): 20 | def __init__(self, offset): 21 | base.FakeContainerClientBase.__init__(self) 22 | self.offset = offset 23 | 24 | def deploy_redis(self, pod, aof, netmode, cluster=True, host=None, 25 | port=6379, *args, **kwarge): 26 | port = port + self.offset 27 | r = base.FakeContainerClientBase.deploy_redis( 28 | self, pod, aof, netmode, cluster=cluster, host=host, 29 | port=port, *args, **kwarge) 30 | r['port'] = port 31 | return r 32 | 33 | def deploy_proxy(self, pod, threads, read_slave, netmode, host=None, 34 | port=8889, *args, **kwarge): 35 | port = port + self.offset 36 | r = base.FakeContainerClientBase.deploy_proxy( 37 | self, pod, threads, read_slave, netmode, host=host, 38 | port=port, *args, **kwarge) 39 | r['port'] = port 40 | return r 41 | 42 | self.replace_eru_client(ClientOffset(12)) 43 | with self.app.test_client() as client: 44 | r = client.post('/containerize/create_redis', data={ 45 | 'port': 6500, 46 | 'pod': 'pod', 47 | 'aof': 'y', 48 | 'netmode': 'vlan', 49 | 'cluster': 'n', 50 | }) 51 | self.assertReqStatus(200, r) 52 | self.assertEqual({ 53 | 'address': '10.0.0.1', 54 | 'container_id': 1, 55 | 'created': '2000-01-01 07:00:00', 56 | 'host': None, 57 | 'port': 6512, 58 | 'version': 'b840fc02d524045429941cc15f59e41cb7be6c52', 59 | }, json.loads(r.data)) 60 | 61 | n = models.node.list_all_nodes() 62 | self.assertEqual(1, len(n)) 63 | n0 = n[0] 64 | self.assertEqual(6512, n0.port) 65 | -------------------------------------------------------------------------------- /test/nodes.py: -------------------------------------------------------------------------------- 1 | import base 2 | import models.node as nm 3 | import models.cluster as clu 4 | 5 | 6 | class InstanceManagement(base.TestCase): 7 | def test_request_instance(self): 8 | nm.create_instance('10.1.201.10', 9000) 9 | nm.create_instance('10.1.201.10', 9001) 10 | nm.create_instance('10.1.201.12', 6376) 11 | 12 | i = sorted(list(nm.list_all_nodes()), key=lambda x: (x.host, x.port)) 13 | self.assertEqual(3, len(i)) 14 | self.assertEqual(('10.1.201.10', 9000, None), 15 | (i[0].host, i[0].port, i[0].assignee)) 16 | self.assertEqual(('10.1.201.10', 9001, None), 17 | (i[1].host, i[1].port, i[1].assignee)) 18 | self.assertEqual(('10.1.201.12', 6376, None), 19 | (i[2].host, i[2].port, i[2].assignee)) 20 | -------------------------------------------------------------------------------- /test/task.py: -------------------------------------------------------------------------------- 1 | import json 2 | import redistrib.command as comm 3 | 4 | import base 5 | from models.task import ClusterTask, TASK_TYPE_LAUNCH 6 | 7 | 8 | class Task(base.TestCase): 9 | def test_execution(self): 10 | with self.app.test_client() as client: 11 | r = client.post('/redis/add', data={ 12 | 'host': '127.0.0.1', 13 | 'port': '7100', 14 | }) 15 | self.assertReqStatus(200, r) 16 | r = client.post('/cluster/add', data={ 17 | 'descr': 'lazy dog', 18 | }) 19 | self.assertReqStatus(200, r) 20 | cluster_id = r.data 21 | 22 | r = client.post('/task/launch', data=json.dumps({ 23 | 'cluster': cluster_id, 24 | 'nodes': [{ 25 | 'host': '127.0.0.1', 26 | 'port': 7100, 27 | }], 28 | })) 29 | self.assertReqStatus(200, r) 30 | self.exec_all_tasks() 31 | 32 | task = ClusterTask(cluster_id=int(cluster_id), task_type=0) 33 | task.add_step( 34 | 'join', cluster_id=cluster_id, cluster_host='127.0.0.1', 35 | cluster_port=7100, newin_host='127.0.0.1', newin_port=7101) 36 | task.add_step( 37 | 'migrate', src_host='127.0.0.1', src_port=7100, 38 | dst_host='127.0.0.1', dst_port=7101, slots=[0, 1]) 39 | self.db.session.add(task) 40 | self.db.session.commit() 41 | 42 | self.exec_all_tasks() 43 | 44 | nodes, node_7100 = comm.list_nodes('127.0.0.1', 7100) 45 | self.assertEqual(2, len(nodes)) 46 | self.assertEqual(range(2, 16384), sorted(node_7100.assigned_slots)) 47 | 48 | tasks = list(self.db.session.query(ClusterTask).order_by( 49 | ClusterTask.id.asc()).all()) 50 | self.assertEqual(2, len(tasks)) 51 | t = tasks[0] 52 | self.assertIsNotNone(t.completion) 53 | self.assertEqual(TASK_TYPE_LAUNCH, t.task_type) 54 | 55 | t = tasks[1] 56 | self.assertIsNotNone(t.completion) 57 | self.assertIsNone(t.exec_error) 58 | self.assertIsNone(t.acquired_lock()) 59 | 60 | comm.quit_cluster('127.0.0.1', 7101) 61 | comm.shutdown_cluster('127.0.0.1', 7100) 62 | 63 | def test_execution_failed(self): 64 | with self.app.test_client() as client: 65 | r = client.post('/redis/add', data={ 66 | 'host': '127.0.0.1', 67 | 'port': '7100', 68 | }) 69 | self.assertReqStatus(200, r) 70 | r = client.post('/cluster/add', data={ 71 | 'descr': 'lazy dog', 72 | }) 73 | self.assertReqStatus(200, r) 74 | cluster_id = r.data 75 | 76 | r = client.post('/task/launch', data=json.dumps({ 77 | 'cluster': cluster_id, 78 | 'nodes': [{ 79 | 'host': '127.0.0.1', 80 | 'port': 7100, 81 | }], 82 | })) 83 | self.assertReqStatus(200, r) 84 | self.exec_all_tasks() 85 | 86 | task = ClusterTask(cluster_id=int(cluster_id), task_type=0) 87 | task.add_step( 88 | 'join', cluster_id=cluster_id, cluster_host='127.0.0.1', 89 | cluster_port=7100, newin_host='127.0.0.1', newin_port=7101) 90 | task.add_step( 91 | 'migrate', src_host='127.0.0.1', src_port=7100, 92 | dst_host='127.0.0.1', dst_port=7101, slots=[0, 1]) 93 | task.add_step( 94 | 'migrate', src_host='127.0.0.1', src_port=7100, 95 | dst_host='127.0.0.1', dst_port=7101, slots=[0, 1]) 96 | task.add_step( 97 | 'migrate', src_host='127.0.0.1', src_port=7100, 98 | dst_host='127.0.0.1', dst_port=7101, slots=[2, 3]) 99 | self.db.session.add(task) 100 | self.db.session.commit() 101 | 102 | self.exec_all_tasks() 103 | 104 | nodes, node_7100 = comm.list_nodes('127.0.0.1', 7100) 105 | self.assertEqual(2, len(nodes)) 106 | self.assertEqual(range(2, 16384), sorted(node_7100.assigned_slots)) 107 | 108 | tasks = list(self.db.session.query(ClusterTask).order_by( 109 | ClusterTask.id.asc()).all()) 110 | self.assertEqual(2, len(tasks)) 111 | t = tasks[1] 112 | self.assertIsNotNone(t.completion) 113 | self.assertIsNotNone(t.exec_error) 114 | self.assertIsNone(t.acquired_lock()) 115 | 116 | steps = t.all_steps 117 | self.assertEqual(4, len(steps)) 118 | step = steps[0] 119 | self.assertTrue(step.completed) 120 | self.assertIsNone(step.exec_error) 121 | step = steps[1] 122 | self.assertTrue(step.completed) 123 | self.assertIsNone(step.exec_error) 124 | step = steps[2] 125 | self.assertTrue(step.completed) 126 | self.assertIsNotNone(step.exec_error) 127 | step = steps[3] 128 | self.assertFalse(step.started) 129 | self.assertFalse(step.completed) 130 | self.assertIsNone(step.exec_error) 131 | 132 | comm.quit_cluster('127.0.0.1', 7101) 133 | comm.shutdown_cluster('127.0.0.1', 7100) 134 | -------------------------------------------------------------------------------- /test/translation_files.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | 4 | import base 5 | 6 | 7 | class Translation(base.TestCase): 8 | def test_json_format(self): 9 | base_dir = os.path.join('static', 'trans') 10 | files = [os.path.join(base_dir, r) for r in os.listdir(base_dir) 11 | if r.endswith('.json')] 12 | for f in files: 13 | with open(f, 'r') as fin: 14 | try: 15 | json.loads(fin.read()) 16 | except: 17 | print 'Failed', f 18 | raise 19 | 20 | def reset_db(self): 21 | pass 22 | -------------------------------------------------------------------------------- /thirdparty/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/projecteru/redis-ctl/16ae59b6dfe3d62ecb59951bd81395c370b005ef/thirdparty/__init__.py -------------------------------------------------------------------------------- /thirdparty/alarm.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | import logging 3 | import requests 4 | 5 | class Base(object): 6 | def __str__(self): 7 | return 'Unimplemented Alarm Service' 8 | 9 | def on_loop_begin(self): 10 | pass 11 | 12 | def send_alarm(self, endpoint, message, exception, **kwargs): 13 | raise NotImplementedError() 14 | 15 | class Timed(Base): 16 | def __init__(self, cool_down_sec): 17 | self.cool_down_sec = cool_down_sec 18 | self._alarmed = {} 19 | 20 | def __str__(self): 21 | return 'Timed Alarm Service' 22 | 23 | def on_loop_begin(self): 24 | eps = [] 25 | now = datetime.now() 26 | for ep, time in self._alarmed.iteritems(): 27 | if self.cool_down_sec < (now - time).seconds: 28 | eps.append(ep) 29 | for ep in eps: 30 | del self._alarmed[ep] 31 | 32 | def do_send_alarm(self, endpoint, message, exception, **kwargs): 33 | # just output alarm to logging service 34 | logging.error("[%s]%s", endpoint, message) 35 | 36 | def send_alarm(self, endpoint, *args, **kwargs): 37 | ep = '%s:%d' % (endpoint.host, endpoint.port) 38 | if ep in self._alarmed: 39 | return 40 | self._alarmed[ep] = datetime.now() 41 | self.do_send_alarm(endpoint, *args, **kwargs) 42 | 43 | class HttpAlarm(Timed): 44 | 45 | def __init__(self, url, cool_down_sec=300): 46 | Timed.__init__(self, cool_down_sec) 47 | self.url = url 48 | 49 | def __str__(self): 50 | return 'Http Alarm Service' 51 | 52 | def do_send_alarm(self, endpoint, message, exception, **kwargs): 53 | try: 54 | params = {} 55 | params['endpoint_host'] = endpoint.host 56 | params['endpoint_port'] = endpoint.port 57 | params['msg'] = message 58 | requests.post(self.url, data=params, timeout=3) 59 | except Exception as e: 60 | logging.error(e.message) 61 | -------------------------------------------------------------------------------- /thirdparty/containerize.py: -------------------------------------------------------------------------------- 1 | class ContainerizeExceptionBase(Exception): 2 | pass 3 | 4 | 5 | class Base(object): 6 | __abstract__ = True 7 | 8 | def __init__(self, config): 9 | self.micro_plan_mem = config.MICRO_PLAN_MEM 10 | 11 | def __str__(self): 12 | return 'Unimplemented Containerize Service' 13 | 14 | def cpu_slice(self): 15 | return 1 16 | 17 | def cpu_slice_factor(self): 18 | return 1 / float(self.cpu_slice()) 19 | 20 | def list_redis_images(self, offset, limit): 21 | return [] 22 | 23 | def lastest_image(self, what): 24 | raise NotImplementedError() 25 | 26 | def deploy(self, what, pod, entrypoint, ncore, host, port, args, 27 | image=None): 28 | raise NotImplementedError() 29 | 30 | def get_container(self, container_id): 31 | raise NotImplementedError() 32 | 33 | def deploy_redis(self, pod, aof, netmode, cluster=True, host=None, 34 | port=6379, image=None, micro_plan=False, **kwargs): 35 | args = ['--port', str(port)] 36 | ncore = 1 37 | if aof: 38 | args.extend(['--appendonly', 'yes']) 39 | if cluster: 40 | args.extend(['--cluster-enabled', 'yes']) 41 | if micro_plan: 42 | args.extend(['--maxmemory', str(self.micro_plan_mem)]) 43 | ncore = self.cpu_slice_factor() 44 | return self.deploy('redis', pod, netmode, ncore, host, port, args, 45 | image=image) 46 | 47 | def deploy_proxy(self, pod, threads, read_slave, netmode, host=None, 48 | port=8889, micro_plan_cpu_slice=None, **kwargs): 49 | ncore = threads 50 | if micro_plan_cpu_slice is not None: 51 | ncore = micro_plan_cpu_slice * self.cpu_slice_factor() 52 | args = ['-b', str(port), '-t', str(threads)] 53 | if read_slave: 54 | args.extend(['-r', 'yes']) 55 | return self.deploy('cerberus', pod, netmode, ncore, host, port, args) 56 | 57 | def rm_containers(self, container_ids): 58 | raise NotImplementedError() 59 | 60 | def revive_container(self, container_id): 61 | raise NotImplementedError() 62 | 63 | def list_pods(self): 64 | raise NotImplementedError() 65 | 66 | def list_pod_hosts(self, pod): 67 | raise NotImplementedError() 68 | -------------------------------------------------------------------------------- /thirdparty/openfalcon.py: -------------------------------------------------------------------------------- 1 | import time 2 | import json 3 | import socket 4 | import itertools 5 | import urlparse 6 | import requests 7 | import logging 8 | 9 | from thirdparty.statistic import Base 10 | 11 | POINT_LIMIT = 400 12 | 13 | 14 | class Client(Base): 15 | def __init__(self, host_query, host_write, port_query, port_write, db, 16 | interval=30): 17 | self.query_uri = urlparse.urlunparse(urlparse.ParseResult( 18 | 'http', '%s:%d' % (host_query, port_query), 'graph/history', 19 | None, None, None)) 20 | self.prefix = db 21 | self.write_addr = (host_write, port_write) 22 | self.interval = interval 23 | 24 | self.socket = None 25 | self.stream = None 26 | self.id_counter = None 27 | self.buf_size = None 28 | self.reconnect() 29 | 30 | def __str__(self): 31 | return 'OpenFalcon write@<%s> query@<%s>' % ( 32 | self.write_addr, self.query_uri) 33 | 34 | def reconnect(self): 35 | self.close() 36 | self.socket = socket.create_connection(self.write_addr) 37 | self.stream = self.socket.makefile() 38 | self.id_counter = itertools.count() 39 | self.buf_size = 1 << 16 40 | 41 | def close(self): 42 | if self.socket is None: 43 | return 44 | self.socket.close() 45 | self.stream.close() 46 | 47 | def __del__(self): 48 | self.close() 49 | 50 | def write_points(self, name, fields): 51 | now = int(time.time()) 52 | try: 53 | self._write([{ 54 | 'metric': metric, 55 | 'endpoint': self.prefix, 56 | 'timestamp': now, 57 | 'step': self.interval, 58 | 'value': val, 59 | 'counterType': 'GAUGE', 60 | 'tags': 'service=redisctl,addr=' + name, 61 | } for metric, val in fields.iteritems()]) 62 | except IOError as e: 63 | logging.error('Fail to write points for %s as %s', name, e.message) 64 | self.reconnect() 65 | raise 66 | 67 | def _write(self, lines): 68 | s = 0 69 | resp = [] 70 | while True: 71 | buf = lines[s: s + self.buf_size] 72 | s = s + self.buf_size 73 | if len(buf) == 0: 74 | break 75 | r = self._rpc('Transfer.Update', buf) 76 | resp.append(r) 77 | return resp 78 | 79 | def _rpc(self, name, *params): 80 | request = { 81 | 'id': next(self.id_counter), 82 | 'params': list(params), 83 | 'method': name, 84 | } 85 | payload = json.dumps(request) 86 | self.socket.sendall(payload) 87 | response = self.stream.readline() 88 | if not response: 89 | raise IOError('empty response') 90 | response = json.loads(response.decode('utf8')) 91 | if response.get('error') is not None: 92 | raise IOError(response.get('error')) 93 | return response.get('result') 94 | 95 | def query_field(self, name, field, aggf, span, end, interval): 96 | r = requests.post(self.query_uri, data=json.dumps({ 97 | 'start': end - span, 98 | 'end': end, 99 | 'cf': aggf, 100 | 'endpoint_counters': [{ 101 | 'endpoint': self.prefix, 102 | 'counter': '%s/addr=%s,service=redisctl' % (field, name), 103 | }], 104 | })).json()[0]['Values'] 105 | if r is None: 106 | return [] 107 | if len(r) > POINT_LIMIT: 108 | r = r[::len(r) / POINT_LIMIT + 1] 109 | return [[x['timestamp'], x['value']] 110 | for x in r if x['value'] is not None] 111 | 112 | def query(self, name, fields, span, end, interval): 113 | result = {} 114 | for f, a in fields.iteritems(): 115 | result[f] = self.query_field(name, f, a, span, end, interval) 116 | return result 117 | -------------------------------------------------------------------------------- /thirdparty/requirements.txt: -------------------------------------------------------------------------------- 1 | requests>=2.10.0 -------------------------------------------------------------------------------- /thirdparty/statistic.py: -------------------------------------------------------------------------------- 1 | class Base(object): 2 | def __str__(self): 3 | return 'Unimplemented Statistic Service' 4 | 5 | def write_points(self, name, fields): 6 | raise NotImplementedError() 7 | 8 | def query(self, name, fields, span, end, interval): 9 | raise NotImplementedError() 10 | --------------------------------------------------------------------------------