├── ztq_console
├── __init__.py
├── CHANGES.txt
├── ztq_console
│ ├── utils
│ │ ├── __init__.py
│ │ ├── security.py
│ │ ├── models.py
│ │ ├── password.py
│ │ ├── dispatch.py
│ │ └── get_fts_data.py
│ ├── static
│ │ ├── images
│ │ │ ├── ok.gif
│ │ │ ├── up.gif
│ │ │ ├── alert.gif
│ │ │ ├── close.gif
│ │ │ ├── down.gif
│ │ │ ├── open.gif
│ │ │ └── header-bg.jpg
│ │ ├── script.js
│ │ ├── style_login.css
│ │ └── style.css
│ ├── tests.py
│ ├── templates
│ │ ├── password.pt
│ │ ├── mainpage.html
│ │ ├── login.pt
│ │ ├── menu.html
│ │ ├── top.html
│ │ ├── syslog.html
│ │ ├── jobs.html
│ │ ├── main.html
│ │ ├── workerlog.html
│ │ ├── queues.html
│ │ ├── errorlog.html
│ │ └── worker.html
│ ├── __init__.py
│ └── views.py
├── MANIFEST.in
├── buildout.cfg
├── setup.cfg
├── LICENSE
├── app.ini
├── setup.py
├── bootstrap.py
├── README.txt
└── README.md
├── ztq_demo
├── __init__.py
├── test_queue.py
├── test_batch.py
├── worker.ini
├── test_callback.py
├── test_ping.py
├── test_transaction.py
├── test_fcallback.py
├── tasks.py
└── test5.py
├── ztq_core
├── test
│ ├── __init__.py
│ ├── test_list.py
│ ├── test.py
│ └── test_redis_wrap.py
├── MANIFEST.in
├── CHANGES.txt
├── ztq_core
│ ├── utils.py
│ ├── __init__.py
│ ├── demo.py
│ ├── cron.py
│ ├── async.py
│ ├── model.py
│ ├── task.py
│ └── redis_wrap.py
├── LICENSE
├── setup.py
├── README.txt
└── README.md
├── ztq_worker
├── CHANGES.txt
├── ztq_worker
│ ├── system_info
│ │ ├── README.txt
│ │ ├── get_cpu_style.vbs
│ │ ├── get_cpu_usage.vbs
│ │ ├── __init__.py
│ │ ├── get_mem_usage.vbs
│ │ ├── win.py
│ │ └── linux.py
│ ├── __init__.py
│ ├── job_thread_manager.py
│ ├── config_manager.py
│ ├── buffer_thread.py
│ ├── command_thread.py
│ ├── main.py
│ ├── command_execute.py
│ └── job_thread.py
├── MANIFEST.in
├── worker.ini
├── LICENSE
├── setup.py
├── README.txt
└── README.md
├── about-ztq.pptx
├── .gitignore
├── LICENSE
└── README.md
/ztq_console/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ztq_demo/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ztq_core/test/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/ztq_core/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include *.txt
2 |
--------------------------------------------------------------------------------
/ztq_core/CHANGES.txt:
--------------------------------------------------------------------------------
1 | 1.0dev
2 | ---
3 |
4 | - Initial version
5 |
--------------------------------------------------------------------------------
/ztq_console/CHANGES.txt:
--------------------------------------------------------------------------------
1 | 1.0dev
2 | ---
3 |
4 | - Initial version
5 |
--------------------------------------------------------------------------------
/ztq_worker/CHANGES.txt:
--------------------------------------------------------------------------------
1 | 1.0dev
2 | ---
3 |
4 | - Initial version
5 |
--------------------------------------------------------------------------------
/about-ztq.pptx:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easydo-cn/ztq/HEAD/about-ztq.pptx
--------------------------------------------------------------------------------
/ztq_worker/ztq_worker/system_info/README.txt:
--------------------------------------------------------------------------------
1 | 这个包是为了得到系统一些基本信息,例如cpu 使用情况、内存使用情况、本机IP。
2 |
3 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/utils/__init__.py:
--------------------------------------------------------------------------------
1 | #coding:utf-8
2 | from dispatch import *
3 | from get_fts_data import *
4 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/static/images/ok.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easydo-cn/ztq/HEAD/ztq_console/ztq_console/static/images/ok.gif
--------------------------------------------------------------------------------
/ztq_console/ztq_console/static/images/up.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easydo-cn/ztq/HEAD/ztq_console/ztq_console/static/images/up.gif
--------------------------------------------------------------------------------
/ztq_worker/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include *.txt
2 | include *.md
3 | include ztq_worker/system_info/*.py
4 | include ztq_worker/system_info/*.vbs
5 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/static/images/alert.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easydo-cn/ztq/HEAD/ztq_console/ztq_console/static/images/alert.gif
--------------------------------------------------------------------------------
/ztq_console/ztq_console/static/images/close.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easydo-cn/ztq/HEAD/ztq_console/ztq_console/static/images/close.gif
--------------------------------------------------------------------------------
/ztq_console/ztq_console/static/images/down.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easydo-cn/ztq/HEAD/ztq_console/ztq_console/static/images/down.gif
--------------------------------------------------------------------------------
/ztq_console/ztq_console/static/images/open.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easydo-cn/ztq/HEAD/ztq_console/ztq_console/static/images/open.gif
--------------------------------------------------------------------------------
/ztq_console/ztq_console/static/images/header-bg.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easydo-cn/ztq/HEAD/ztq_console/ztq_console/static/images/header-bg.jpg
--------------------------------------------------------------------------------
/ztq_worker/ztq_worker/system_info/get_cpu_style.vbs:
--------------------------------------------------------------------------------
1 | Set objProc = GetObject("winmgmts:\\.\root\cimv2:win32_processor='cpu0'")
2 | Wscript.Echo objProc.Name ' 得到cpu型号
3 |
--------------------------------------------------------------------------------
/ztq_worker/ztq_worker/system_info/get_cpu_usage.vbs:
--------------------------------------------------------------------------------
1 | Set objProc = GetObject("winmgmts:\\.\root\cimv2:win32_processor='cpu0'")
2 | Wscript.Echo Round(objProc.LoadPercentage , 2) ' 得到cpu使用率
3 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/utils/security.py:
--------------------------------------------------------------------------------
1 | #coding:utf-8
2 | USERS = {'admin':'admin', 'viewer':'viewer'}
3 | GROUPS = {'admin':['group:editors']}
4 |
5 | def groupfinder(userid, request):
6 | if userid in USERS:
7 | return GROUPS.get(userid, [])
8 |
--------------------------------------------------------------------------------
/ztq_console/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include *.txt
2 | include app.ini
3 | include ztq_console/utils/*.py
4 | recursive-include ztq_console/templates *.html *.pt
5 | recursive-include ztq_console/static *.js *.css
6 | recursive-include ztq_console/static/images *.gif *.jpg
7 |
--------------------------------------------------------------------------------
/ztq_worker/ztq_worker/__init__.py:
--------------------------------------------------------------------------------
1 |
2 | from command_thread import CommandThread
3 | from job_thread import report_progress, report_job
4 | from config_manager import register_batch_queue
5 | from command_execute import start_buffer_thread, init_job_threads
6 |
7 |
--------------------------------------------------------------------------------
/ztq_worker/ztq_worker/system_info/__init__.py:
--------------------------------------------------------------------------------
1 | # -*- encoding: utf-8 -*-
2 | import sys
3 |
4 | if sys.platform.startswith('win'):
5 | from win import get_cpu_style, get_cpu_usage, get_mem_usage, get_ip
6 | else:
7 | from linux import get_cpu_style, get_cpu_usage, get_mem_usage, get_ip
8 |
--------------------------------------------------------------------------------
/ztq_demo/test_queue.py:
--------------------------------------------------------------------------------
1 | import time
2 | import ztq_core
3 | from ztq_demo.tasks import send
4 |
5 | ztq_core.setup_redis('default','localhost', 6379, 3)
6 |
7 | send('hello, world 1')
8 | send('hello, world 2')
9 | send('hello, world 3')
10 |
11 | send('hello, world 4', ztq_queue='mail')
12 |
--------------------------------------------------------------------------------
/ztq_worker/worker.ini:
--------------------------------------------------------------------------------
1 | [server]
2 | host = localhost
3 | port = 6379
4 | db = 0
5 | alias = w01
6 | active_config = false
7 | modules = ztq_core.demo
8 |
9 | [queues]
10 | default = 0
11 | mail = 0
12 |
13 | [log]
14 | key = ztq_worker
15 | handler_file = ./ztq_worker.log
16 | level = ERROR
17 |
18 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/utils/models.py:
--------------------------------------------------------------------------------
1 | #coding:utf-8
2 | from pyramid.security import Allow, Everyone
3 |
4 |
5 | class RootFactory(object):
6 | __acl__ = [ (Allow, Everyone, 'view'),
7 | (Allow, 'group:editors', 'edit') ]
8 | def __init__(self, request):
9 | pass
10 |
--------------------------------------------------------------------------------
/ztq_demo/test_batch.py:
--------------------------------------------------------------------------------
1 | # encoding: utf-8
2 | # my_send.py
3 | import time
4 | import ztq_core
5 | from ztq_demo.tasks import index
6 |
7 | ztq_core.setup_redis('default','localhost', 6379, 3)
8 |
9 | index('only one')
10 |
11 | time.sleep(3)
12 |
13 | for i in range(8):
14 | index('data %d' % i)
15 |
--------------------------------------------------------------------------------
/ztq_demo/worker.ini:
--------------------------------------------------------------------------------
1 | [server]
2 | host = localhost
3 | port = 6379
4 | db = 3
5 | alias = w01
6 | active_config = false
7 | modules = ztq_demo.tasks
8 |
9 | [log]
10 | key = ztq_worker
11 | handler_file = ./ztq_worker.log
12 | level = ERROR
13 |
14 | [queues]
15 | default = 0
16 | mail = 0
17 | #index = 0
18 |
--------------------------------------------------------------------------------
/ztq_demo/test_callback.py:
--------------------------------------------------------------------------------
1 | # encoding: utf-8
2 | # my_send.py
3 | import time
4 | import ztq_core
5 | import transaction
6 | from ztq_demo.tasks import send
7 |
8 | ztq_core.setup_redis('default','localhost', 6379, 3)
9 |
10 | callback = ztq_core.prepare_task(send, 'yes, callback!!')
11 | send('see callback?', ztq_callback=callback)
12 |
13 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.py[co]
2 |
3 | # Packages
4 | *.egg
5 | *.egg-info
6 | dist
7 | build
8 | eggs
9 | parts
10 | bin
11 | var
12 | sdist
13 | develop-eggs
14 | .installed.cfg
15 |
16 | # Installer logs
17 | pip-log.txt
18 |
19 | # Unit test / coverage reports
20 | .coverage
21 | .tox
22 |
23 | #Translations
24 | *.mo
25 |
26 | #Mr Developer
27 | .mr.developer.cfg
28 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/tests.py:
--------------------------------------------------------------------------------
1 | #coding:utf-8
2 |
3 | from paste.httpserver import serve
4 | import sys,os
5 | sys.path.insert(0, os.path.abspath('../'))
6 | sys.path.append('E:\\workspace\\Everydo_DBank\\src\\fts\\ztq_core')
7 |
8 | if __name__ == '__main__':
9 | # For Debug
10 | from ztq_console import main
11 | app = main('test')
12 | serve(app, host='0.0.0.0', port=9013)
13 |
14 |
--------------------------------------------------------------------------------
/ztq_demo/test_ping.py:
--------------------------------------------------------------------------------
1 | import time
2 | import ztq_core
3 | from ztq_demo.tasks import send
4 |
5 | ztq_core.setup_redis('default','localhost', 6379, 3)
6 |
7 | send('hello, world 1')
8 | send('hello, world 2')
9 |
10 | print 'hello, world 1:'
11 | import pdb; pdb.set_trace()
12 | print ztq_core.ping_task(send, 'hello, world 1')
13 | print 'hello, world 2:'
14 | print ztq_core.ping_task(send, 'hello, world 2')
15 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/static/script.js:
--------------------------------------------------------------------------------
1 | function show_detail(obj){
2 | pre = obj.firstChild
3 | content = pre.innerHTML
4 | openwindow=window.open ("", "详细信息", "height=300, width=600, toolbar=no, menubar=no, scrollbars=yes, resizable=no, location=no, status=no")
5 | openwindow.document.write('
' + content + ' ')
6 | openwindow.document.close()
7 | return null;
8 | }
9 |
--------------------------------------------------------------------------------
/ztq_worker/ztq_worker/system_info/get_mem_usage.vbs:
--------------------------------------------------------------------------------
1 | set objWMI = GetObject("winmgmts:\\.\root\cimv2")
2 | set colOS = objWMI.InstancesOf("Win32_OperatingSystem")
3 | for each objOS in colOS
4 | strReturn = round(objOS.TotalVisibleMemorySize / 1024) & vbCrLf & round(objOS.FreePhysicalMemory / 1024) & vbCrLf & Round(((objOS.TotalVisibleMemorySize-objOS.FreePhysicalMemory)/objOS.TotalVisibleMemorySize)*100)
5 | Wscript.Echo strReturn
6 | next
7 |
--------------------------------------------------------------------------------
/ztq_demo/test_transaction.py:
--------------------------------------------------------------------------------
1 | # encoding: utf-8
2 | # my_send.py
3 | import time
4 | import ztq_core
5 | import transaction
6 | from ztq_demo.tasks import send
7 |
8 | ztq_core.setup_redis('default','localhost', 6379, 3)
9 |
10 | ztq_core.enable_transaction(True)
11 |
12 | send('transaction send 1')
13 | send('transaction send 2')
14 |
15 | send('no transaction msg show first', ztq_transaction=False)
16 |
17 | print 'send, waitting for commit'
18 | time.sleep(5)
19 |
20 | transaction.commit()
21 | print 'committed'
22 |
23 |
--------------------------------------------------------------------------------
/ztq_console/buildout.cfg:
--------------------------------------------------------------------------------
1 | [buildout]
2 | newest = false
3 | index = http://pypi.douban.com/simple
4 | eggs-directory = /opt/buildout-cache/eggs
5 | find-links = http://www.pythonware.com/products/pil/
6 | http://distfiles.minitage.org/public/externals/minitage/
7 |
8 | develop = .
9 | ../ztq_core
10 |
11 | parts =
12 | app
13 |
14 | [app]
15 | recipe = zc.recipe.egg
16 | eggs = ztq_core
17 | Paste
18 | PasteDeploy
19 | PasteScript
20 | ztq_console
21 | WebOb >= 1.0
22 |
23 | interpreter = python
24 |
--------------------------------------------------------------------------------
/ztq_demo/test_fcallback.py:
--------------------------------------------------------------------------------
1 | # encoding: utf-8
2 | # my_send.py
3 | import time
4 | import ztq_core
5 | import transaction
6 | from ztq_demo.tasks import send, send_failed, failed_callback
7 |
8 | ztq_core.setup_redis('default','localhost', 6379, 3)
9 |
10 | callback = ztq_core.prepare_task(send, 'succeed!', ztq_queue='mail')
11 | fcallback = ztq_core.prepare_task(failed_callback)
12 |
13 | send('send a good msg, see what?',
14 | ztq_queue= 'mail',
15 | ztq_callback=callback,
16 | ztq_fcallback=fcallback)
17 |
18 | send_failed('send a failed msg, see what?',
19 | ztq_callback=callback,
20 | ztq_fcallback=fcallback)
21 |
22 |
--------------------------------------------------------------------------------
/ztq_console/setup.cfg:
--------------------------------------------------------------------------------
1 | [nosetests]
2 | match = ^test
3 | nocapture = 1
4 | cover-package = ztq_console
5 | with-coverage = 1
6 | cover-erase = 1
7 |
8 | [compile_catalog]
9 | directory = ztq_console/locale
10 | domain = ztq_console
11 | statistics = true
12 |
13 | [extract_messages]
14 | add_comments = TRANSLATORS:
15 | output_file = ztq_console/locale/ztq_console.pot
16 | width = 80
17 |
18 | [init_catalog]
19 | domain = ztq_console
20 | input_file = ztq_console/locale/ztq_console.pot
21 | output_dir = ztq_console/locale
22 |
23 | [update_catalog]
24 | domain = ztq_console
25 | input_file = ztq_console/locale/ztq_console.pot
26 | output_dir = ztq_console/locale
27 | previous = true
28 |
--------------------------------------------------------------------------------
/ztq_demo/tasks.py:
--------------------------------------------------------------------------------
1 | # encoding: utf-8
2 | from ztq_core import async
3 | import time
4 |
5 | @async
6 | def send(body):
7 | print 'START: ', body
8 | time.sleep(3)
9 | print 'END: ', body
10 |
11 | @async(queue='mail')
12 | def send_failed(body):
13 | print 'FAIL START:', body
14 | raise Exception('connection error...')
15 |
16 | @async(queue='mail')
17 | def failed_callback(return_code, return_msg):
18 | print 'FAILED CALLBACK:', return_code, return_msg
19 |
20 | @async(queue='index')
21 | def index(data):
22 | print 'INDEX:', data
23 | time.sleep(1)
24 |
25 | def do_commit():
26 | print 'COMMITTED'
27 |
28 | import ztq_worker
29 | ztq_worker.register_batch_queue('index', 5, do_commit)
30 |
--------------------------------------------------------------------------------
/ztq_core/ztq_core/utils.py:
--------------------------------------------------------------------------------
1 | #coding:utf-8
2 | from async import async
3 | import redis_wrap
4 | import urllib2
5 | from cron import has_cron, add_cron
6 |
7 | @async(queue='clock')
8 | def bgrewriteaof():
9 | """ 将redis的AOF文件压缩 """
10 | redis = redis_wrap.get_redis()
11 | redis.bgrewriteaof()
12 |
13 | def set_bgrewriteaof():
14 | # 自动定时压缩reids
15 | if not has_cron(bgrewriteaof):
16 | add_cron({'hour':1}, bgrewriteaof)
17 |
18 | @async(queue='urlopen')
19 | def async_urlopen(url, params=None, timeout=120):
20 | try:
21 | # 将unicode转换成utf8
22 | urllib2.urlopen(url.encode('utf-8'), params, timeout=timeout)
23 | except IOError:
24 | raise IOError('Could not connected to %s' % url)
25 |
26 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/templates/password.pt:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 | 设置登录密码
9 | 在这里,您可以设置控制台管理员的登陆密码:
10 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/ztq_core/ztq_core/__init__.py:
--------------------------------------------------------------------------------
1 | #coding:utf-8
2 | from redis_wrap import (
3 | get_redis,
4 | get_list,
5 | get_hash,
6 | get_set,
7 | setup_redis,
8 | get_key,
9 | set_key,
10 | get_queue,
11 | get_dict,
12 | ConnectionError,
13 | ResponseError
14 | )
15 |
16 | from task import (
17 | task_registry,
18 | register,
19 | push_task,
20 | has_task,
21 | pop_task,
22 | pop_error,
23 | push_runtime_error,
24 | gen_task,
25 | push_runtime_task
26 | )
27 |
28 | from model import *
29 |
30 | from cron import add_cron, has_cron, remove_cron, start_cron
31 |
32 | from async import async, enable_transaction, ping_task, prepare_task
33 |
34 |
--------------------------------------------------------------------------------
/ztq_core/test/test_list.py:
--------------------------------------------------------------------------------
1 | '''
2 | Created on 2011-4-18
3 |
4 | @author: Zay
5 | '''
6 | from ztq_core import get_redis, get_list, get_hash, get_set, get_dict, setup_redis, \
7 | get_key, set_key, get_queue
8 |
9 | def main():
10 | setup_redis('default', '192.168.209.128', 6380)
11 | get_redis(system='default').delete('list')
12 | message = 'hello'
13 |
14 | Test_list = get_list('list',serialized_type='string')
15 | Test_list.append(message)
16 |
17 | #Test_list.remove(message)
18 |
19 | print get_redis(system='default').lrem('list', 0, 'hello')
20 |
21 | Test_set = get_set('set',serialized_type='string')
22 | Test_set.add(message)
23 | print get_redis(system='default').srem('set', 'hello')
24 |
25 |
26 | if __name__ == '__main__':
27 | main()
28 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/utils/password.py:
--------------------------------------------------------------------------------
1 | #coding:utf-8
2 | from ConfigParser import ConfigParser, RawConfigParser
3 | import sys
4 | import os
5 |
6 | def get_password():
7 | cf = ConfigParser()
8 | cf.read(sys.argv[1])
9 | password_path = cf.get('password_path', 'password_path')
10 | cf.read(password_path)
11 | return cf.get('password', 'password')
12 |
13 | def modify_password(new_password):
14 | cf = ConfigParser()
15 | cf.read(sys.argv[1])
16 | password_path = cf.get('password_path', 'password_path')
17 | if os.path.exists(password_path):
18 | passwd_txt = ConfigParser()
19 | passwd_txt.read(password_path)
20 | else:
21 | passwd_txt = RawConfigParser()
22 | passwd_txt.add_section('password')
23 |
24 | passwd_txt.set('password', 'password', new_password)
25 | with open(password_path, 'w') as new_password:
26 | passwd_txt.write(new_password)
27 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/templates/mainpage.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | {{ title }}
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/templates/login.pt:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 | 登录控制台
9 | 进入控制后台前,请输入登录用户和登录密码:
10 |
27 |
28 |
29 |
--------------------------------------------------------------------------------
/ztq_core/test/test.py:
--------------------------------------------------------------------------------
1 | #coding:utf-8
2 | import unittest
3 |
4 |
5 | import ztq_core
6 |
7 | def echo():
8 | print 'hello'
9 |
10 | class TestftsModel(unittest.TestCase):
11 | def setUp(self):
12 | ztq_core.setup_redis('default', '192.168.209.128', 6379)
13 | self.testmessage = {'test':'test'}
14 |
15 | def testJsonList(self):
16 | """Test queue connect
17 | """
18 | self.queue = ztq_core.get_task_queue('q01')
19 | self.queue.append(self.testmessage)
20 | revmessage = self.queue.pop()
21 | self.assertEqual(revmessage,self.testmessage)
22 |
23 | def _testRegister(self):
24 | """测试JobThread
25 | """
26 | ztq_core.task.register(echo)
27 | ztq_core.task.task_push(u'foo:echo', 'aaa', 'bb', c='bar')
28 | job_thread = ztq_core.task.JobThread('foo')
29 | job_thread.start()
30 |
31 | if __name__ == '__main__':
32 | unittest.main()
33 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (C) 2012 Everydo
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4 |
5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6 |
7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
8 |
--------------------------------------------------------------------------------
/ztq_console/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (C) 2012 Everydo
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4 |
5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6 |
7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
8 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/templates/menu.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
13 |
26 |
27 |
--------------------------------------------------------------------------------
/ztq_core/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (C) 2012 Everydo
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4 |
5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6 |
7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
8 |
--------------------------------------------------------------------------------
/ztq_worker/LICENSE:
--------------------------------------------------------------------------------
1 | Copyright (C) 2012 Everydo
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4 |
5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6 |
7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
8 |
--------------------------------------------------------------------------------
/ztq_demo/test5.py:
--------------------------------------------------------------------------------
1 | # encoding: utf-8
2 | # my_send.py
3 | import time
4 | import ztq_core
5 | import transaction
6 | from ztq_demo.tasks import send, send_failed, failed_callback
7 |
8 | ztq_core.setup_redis('default','localhost', 6379, 3)
9 |
10 | #send('hello, world 1')
11 | #send('hello, world 2')
12 | #send('hello, world 3')
13 |
14 |
15 |
16 | #send('hello, world 3', ztq_queue='mail')
17 |
18 |
19 |
20 | #ztq_core.enable_transaction(True)
21 | #send('send 1')
22 | #send('send 2')
23 | #print 'send, waitting for commit'
24 | #time.sleep(5)
25 | #transaction.commit()
26 | #print 'committed'
27 |
28 |
29 |
30 |
31 | #ztq_core.enable_transaction(True)
32 | #send('transaction msg show later')
33 | #send('no transaction msg show first', ztq_transaction=False)
34 | #transaction.commit()
35 |
36 |
37 | #ztq_core.enable_transaction(False)
38 | #callback = ztq_core.prepare_task(send, 'yes, callback!!')
39 | #send('see callback?', ztq_callback=callback)
40 |
41 | #fc = ztq_core.prepare_task(failed_callback)
42 | #send_failed('send a failed msg, see failed callback?', ztq_fcallback=fc)
43 |
44 |
--------------------------------------------------------------------------------
/ztq_console/app.ini:
--------------------------------------------------------------------------------
1 | [app:ztq_console]
2 | use = egg:ztq_console
3 | reload_templates = true
4 | debug_authorization = false
5 | debug_notfound = false
6 | debug_routematch = false
7 | debug_templates = false
8 | default_locale_name = en
9 | redis_db = 1
10 | redis_port = 6379
11 | redis_host = 127.0.0.1
12 |
13 | [filter:weberror]
14 | use = egg:WebError#error_catcher
15 | debug = true
16 |
17 | [pipeline:main]
18 | pipeline =
19 | egg:WebError#evalerror
20 | ztq_console
21 |
22 | [server:main]
23 | use = egg:Paste#http
24 | host = 0.0.0.0
25 | port = 9001
26 |
27 | # Begin logging configuration
28 |
29 | [loggers]
30 | keys = root, ztq_console
31 |
32 | [handlers]
33 | keys = console
34 |
35 | [formatters]
36 | keys = generic
37 |
38 | [logger_root]
39 | level = INFO
40 | handlers = console
41 |
42 | [logger_ztq_console]
43 | level = DEBUG
44 | handlers =
45 | qualname = ztq_console
46 |
47 | [handler_console]
48 | class = StreamHandler
49 | args = (sys.stderr,)
50 | level = NOTSET
51 | formatter = generic
52 |
53 | [formatter_generic]
54 | format = %(asctime)s %(levelname)-5.5s [%(name)s][%(threadName)s] %(message)s
55 | # End logging configuration
56 |
57 |
--------------------------------------------------------------------------------
/ztq_core/setup.py:
--------------------------------------------------------------------------------
1 |
2 | import os
3 | from setuptools import setup, find_packages
4 |
5 | here = os.path.abspath(os.path.dirname(__file__))
6 | README = open(os.path.join(here, 'README.txt')).read()
7 | CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
8 |
9 | requires = [ 'redis>=2.4.9', 'transaction',]
10 |
11 | if not os.sys.platform.startswith('win'):
12 | requires.append('hiredis')
13 |
14 | setup(name='ztq_core',
15 | version = '1.2.5',
16 | author="edo",
17 | author_email="service@everydo.com",
18 | url="http://everydo.com/",
19 | description=u"Zopen Task Queue Core",
20 | long_description=README + '\n\n' + CHANGES,
21 | packages=find_packages(),
22 | license = "MIT",
23 | platforms=["Any"],
24 | keywords='Everydo queue ztq_core async',
25 | classifiers = [
26 | 'Development Status :: 4 - Beta',
27 | 'Environment :: Web Environment',
28 | 'Intended Audience :: Developers',
29 | 'Programming Language :: Python',
30 | 'Operating System :: OS Independent',
31 | 'Topic :: Internet :: WWW/HTTP',
32 | ],
33 | install_requires = requires,
34 | )
35 |
36 |
--------------------------------------------------------------------------------
/ztq_worker/ztq_worker/job_thread_manager.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 |
3 | from job_thread import JobThread
4 | import sys
5 |
6 | class JobThreadManager:
7 | """ 管理工作线程, 开启/停止工作线程 """
8 | # 保存工作线程的信息
9 | # threads = {'thread-1':, 'thread-2':}
10 | threads = {}
11 |
12 | def add(self, queue_name, sleep_time, from_right=True):
13 | """ 开启一个工作线程 """
14 | job_thread = JobThread(queue_name, sleep_time, from_right)
15 | job_thread.setDaemon(True)
16 | job_thread.start()
17 | self.threads[job_thread.getName()] = job_thread
18 | sys.stdout.write(
19 | 'start a job thread, name: %s,'
20 | ' ident: %s,'
21 | ' queue_name: %s\n'
22 | % (job_thread.getName(), job_thread.ident, queue_name)
23 | )
24 |
25 | def stop(self, job_name):
26 | """ 安全的停止一个工作线程
27 | 正在转换中的时候,会等待转换完成后自动退出
28 | """
29 | if not job_name in self.threads:
30 | return
31 | #sys.stdout.write('stop %s job thread\n'% job_name)
32 | job_thread = self.threads[job_name]
33 | job_thread.stop()
34 | del self.threads[job_name]
35 |
--------------------------------------------------------------------------------
/ztq_worker/ztq_worker/config_manager.py:
--------------------------------------------------------------------------------
1 | # -*- encoding: utf-8 -*-
2 |
3 | import os
4 |
5 | from ConfigParser import RawConfigParser as ConfigParser
6 | from system_info import get_ip
7 |
8 | # 读取配置文件(app.ini),保存到CONFIG中,实际使用的都是CONFIG
9 | CONFIG = {'server':{'alias':get_ip()},
10 | 'queues':{} }
11 |
12 | def read_config_file(location=None):
13 | """ 初始化配置管理
14 | """
15 | cfg = ConfigParser()
16 | if location:
17 | cfg.read(location)
18 | else:
19 | local_dir = os.path.dirname(os.path.realpath(__file__))
20 | cfg.read( os.path.join(local_dir, 'config.cfg') )
21 |
22 | global CONFIG
23 | for section in cfg.sections():
24 | CONFIG[section] = {}
25 | for option in cfg.options(section):
26 | CONFIG[section][option] = cfg.get(section, option)
27 | return CONFIG
28 |
29 |
30 | def register_batch_queue(queue_name, batch_size, batch_func=None):
31 | """ 注册队列是批处理模式
32 | queue_name: 指定哪个队列为批处理模式
33 | batch_size: 整形
34 | batch_func: 方法对象
35 |
36 | 可以让队列在完成batch_size 后,执行一次 batch_func
37 | """
38 | CONFIG.setdefault('batch_queue', {}).update(
39 | {queue_name:{'batch_size':batch_size, 'batch_func':batch_func}})
40 |
41 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/utils/dispatch.py:
--------------------------------------------------------------------------------
1 | #coding:utf-8
2 | """
3 | 功能描述:调度算法模块, 根据服务器和队列的权重进行工作调度.
4 | """
5 |
6 | import time
7 | import ztq_core
8 |
9 | def update_queue_threads(worker_name, queue_name, action):
10 | """调整特定队列线程数量,可以增加或者减少
11 | """
12 | worker_config = ztq_core.get_worker_config()
13 | queue_config = worker_config[worker_name]
14 | if queue_config.get(queue_name, None) is not None:
15 | _config = queue_config[queue_name]
16 |
17 | # 生成新的配置信息
18 | if action == 'queue_down':
19 | _config.pop()
20 | elif action == 'queue_up' :
21 | _config.append({u'interval': 0})
22 | queue_config[queue_name] = _config
23 |
24 | worker_config[worker_name]= queue_config
25 | send_sync_command(worker_name)
26 |
27 | def send_sync_command(worker_name):
28 | """向转换器下达同步指令
29 | """
30 | sync_command= {'command':'updateworker','timestamp':int(time.time())}
31 | cmd_queue = ztq_core.get_command_queue(worker_name)
32 | # 避免同时发送多条同步命令
33 | if cmd_queue:
34 | for command in cmd_queue:
35 | if command.get('command', None) == sync_command['command']:
36 | return 0
37 | cmd_queue.push(sync_command)
38 |
39 |
40 |
--------------------------------------------------------------------------------
/ztq_worker/setup.py:
--------------------------------------------------------------------------------
1 | import os
2 | from setuptools import setup
3 |
4 | here = os.path.abspath(os.path.dirname(__file__))
5 | README = open(os.path.join(here, 'README.txt')).read()
6 | CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
7 |
8 | setup (
9 | name='ztq_worker',
10 | version='1.3.2',
11 | author = "xutaozhe",
12 | author_email = "xutaozhe@zopen.cn",
13 | description=u"Zopen Task Queue Worker",
14 | long_description=README + '\n\n' + CHANGES,
15 | license = "MIT",
16 | keywords='Everydo queue async ztq_worker',
17 | classifiers = [
18 | 'Development Status :: 4 - Beta',
19 | 'Environment :: Web Environment',
20 | 'Intended Audience :: Developers',
21 | 'Programming Language :: Python',
22 | 'Operating System :: OS Independent',
23 | 'Topic :: Internet :: WWW/HTTP',
24 | ],
25 | packages = ['ztq_worker'],
26 | #package_dir={'ztq_worker': 'ztq_worker'},
27 | #package_data={'ztq_worker': ['system_info/*.vbs'] },
28 | data_files=[('config', ['worker.ini']),],
29 | include_package_data = True,
30 | install_requires = [
31 | "ztq_core",
32 | "psutil",
33 | ],
34 | entry_points = """\
35 | [console_scripts]
36 | ztq_worker = ztq_worker.main:run
37 | """,
38 | zip_safe = False,
39 | )
40 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/templates/top.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
33 |
34 |
35 |
--------------------------------------------------------------------------------
/ztq_core/ztq_core/demo.py:
--------------------------------------------------------------------------------
1 | # encoding: utf-8
2 | # my_send.py
3 | from ztq_core import async
4 | import ztq_worker
5 | import time
6 |
7 | @async(queue='mail')
8 | def send(body):
9 | print 'START: ', body
10 | time.sleep(5)
11 | print 'END: ', body
12 |
13 | @async(queue='mail')
14 | def send2(body):
15 | print 'START2 … ', body
16 | raise Exception('connection error')
17 |
18 | @async(queue='mail')
19 | def call_process(filename):
20 | print 'call process:', filename
21 | ztq_worker.report_job(12323, comment=filename)
22 | time.sleep(20)
23 |
24 | @async(queue='mail')
25 | def fail_callback(return_code, return_msg):
26 | print 'failed, noe in failed callback'
27 | print return_code, return_msg
28 |
29 | def test():
30 | import ztq_core
31 | import transaction
32 |
33 | from ztq_core import demo
34 |
35 | ztq_core.setup_redis('default','localhost', 6379, 1)
36 |
37 | demo.send('*' * 40, ztq_queue='mail')
38 |
39 | demo.send('transaction will on', ztq_queue='mail')
40 | ztq_core.enable_transaction(True)
41 |
42 | demo.send('transaction msg show later')
43 | demo.send('no transaction msg show first', ztq_transaction=False)
44 | time.sleep(5)
45 | transaction.commit()
46 |
47 | ztq_core.enable_transaction(False)
48 |
49 | demo.send('transaction off')
50 | callback = ztq_core.prepare_task(demo.send, 'yes, callback!!')
51 | demo.send('see callback?', ztq_callback=callback)
52 |
53 | ff = ztq_core.prepare_task(demo.fail_callback)
54 | demo.send2('send a failed msg, see failed callback?', ztq_fcallback=ff)
55 |
56 | call_process('saa.exe')
57 |
58 | if __name__ == '__main__':
59 | test()
60 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/templates/syslog.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 | 最近{{sindex}} ~ {{eindex}}条服务器运行如下:
9 |
10 |
11 |
12 |
13 | 工作端
14 | 服务器IP
15 | 报告时间
16 | 状态
17 |
18 |
19 |
20 | {% if sys_log %}
21 | {% for log in sys_log %}
22 |
23 | {{ log['alias'] }}
24 | {{ log['host'] }}
25 | {{ log['_timestamp'] }}
26 | {{ log['type'] }}
27 |
28 | {% endfor %}
29 | {% endif %}
30 |
31 |
32 |
33 |
34 | {% if fpage %}
35 | 上一页
36 | {% else %}
37 | 上一页
38 | {% endif %}
39 |
40 | < {{npage-1}} >
41 |
42 | 下一页
43 |
44 |
45 |
46 |
47 |
48 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/templates/jobs.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | {{ queue_name }}:队列信息
10 |
11 |
12 |
13 |
14 | 提交时间
15 | 任务信息
16 | 操作
17 |
18 |
19 |
20 | {% if jobs %}
21 | {% for job in jobs %}
22 |
23 | {{ job['_created'] }}
24 |
25 | {{ job['_detail'] }} 详细信息
26 |
27 |
28 | 优先处理
29 | 删除
30 |
31 |
32 | {% endfor %}
33 | {% endif %}
34 |
35 |
36 |
37 |
38 |
39 |
--------------------------------------------------------------------------------
/ztq_worker/ztq_worker/system_info/win.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | import subprocess as sp
3 | import urllib
4 | from os import path
5 |
6 | LOCAL_DIR = path.dirname(path.realpath(__file__))
7 |
8 | def get_cpu_usage():
9 | vbs_file = 'get_cpu_usage.vbs'
10 | vbs_path = path.join(LOCAL_DIR, vbs_file)
11 | popen = sp.Popen('cscript /nologo %s'%vbs_path, stdout=sp.PIPE, shell=True)
12 | popen.wait()
13 | result = popen.stdout.read()
14 | return '%s%%'%result.strip()
15 |
16 | def get_mem_usage():
17 | vbs_file = 'get_mem_usage.vbs'
18 | vbs_path = path.join(LOCAL_DIR, vbs_file)
19 | popen = sp.Popen('cscript /nologo %s'%vbs_path, stdout=sp.PIPE, shell=True)
20 | popen.wait()
21 | result = popen.stdout.read()
22 | mem_total, mem_usage, mem_percent = result.split()
23 | return ( '%s%%'%mem_percent, '%sM'%mem_total )
24 |
25 | _CPU_STYLE = None
26 | def get_cpu_style():
27 | global _CPU_STYLE
28 | if _CPU_STYLE is None:
29 | vbs_file = 'get_cpu_style.vbs'
30 | vbs_path = path.join(LOCAL_DIR, vbs_file)
31 | popen = sp.Popen('cscript /nologo %s'%vbs_path, stdout=sp.PIPE, shell=True)
32 | popen.wait()
33 | result = popen.stdout.read()
34 | cpu_style = '%s'%result.strip()
35 |
36 | try:
37 | cpu_style = cpu_style.decode('gb18030')
38 | except UnicodeDecodeError:
39 | cpu_style = cpu_style.decode('utf8')
40 |
41 | _CPU_STYLE = cpu_style.encode('utf8')
42 |
43 | return _CPU_STYLE
44 |
45 | def get_ip():
46 | return urllib.thishost()
47 |
48 | if __name__ == '__main__':
49 | print 'cpu style: %s' % get_cpu_style()
50 | print 'cpu usage: %s' % get_cpu_usage()
51 | print 'memory usage: %s, memory total: %s' % get_mem_usage()
52 | print 'local ip addrs: %s'%get_ip()
53 |
--------------------------------------------------------------------------------
/ztq_console/setup.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from setuptools import setup
4 |
5 | here = os.path.abspath(os.path.dirname(__file__))
6 | README = open(os.path.join(here, 'README.txt')).read()
7 | CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
8 |
9 | requires = ['pyramid==1.4.5', 'WebError', 'pyramid_jinja2==1.9','ztq_core' ]
10 |
11 | setup(name='ztq_console',
12 | version='1.2.5',
13 | description='Zopen Task Queue Console',
14 | long_description=README + '\n\n' + CHANGES,
15 | license = "MIT",
16 | author='edo',
17 | author_email="service@everydo.com",
18 | url="http://everydo.com/",
19 | keywords='Everydo queue monitor console async',
20 | packages=['ztq_console'],
21 | package_dir={'ztq_console': 'ztq_console'},
22 | package_data={'ztq_console': ['templates/*.html',
23 | 'static/*.js',
24 | 'static/*.css',
25 | 'static/images/*.gif',
26 | 'static/images/*.jpg',
27 | 'utils/*.py',
28 | ]},
29 | data_files=[('config', ['app.ini']),],
30 | include_package_data=True,
31 | zip_safe=False,
32 | install_requires=requires,
33 | tests_require=requires,
34 | test_suite="ztq_console",
35 | classifiers = [
36 | 'Development Status :: 4 - Beta',
37 | 'Environment :: Web Environment',
38 | 'Intended Audience :: Developers',
39 | 'Programming Language :: Python',
40 | 'Operating System :: OS Independent',
41 | 'Topic :: Internet :: WWW/HTTP',
42 | 'Framework :: Pyramid',
43 | ],
44 | entry_points = """\
45 | [paste.app_factory]
46 | main = ztq_console:main
47 | """,
48 | paster_plugins=['pyramid'],
49 | )
50 |
51 |
--------------------------------------------------------------------------------
/ztq_worker/ztq_worker/buffer_thread.py:
--------------------------------------------------------------------------------
1 | # -*- encoding: utf-8 -*-
2 | import threading
3 | import time
4 |
5 | import ztq_core
6 |
7 | class BufferThread(threading.Thread):
8 |
9 | def __init__(self, config):
10 | """ cofnig: {'job-0':{'thread_limit': 50},,,}
11 | """
12 | super(BufferThread, self).__init__()
13 | self.config = config
14 | self._stop = False
15 |
16 | def run(self):
17 |
18 | if not self.config: return
19 |
20 | while not self._stop:
21 | for buffer_name, buffer_config in self.config.items():
22 |
23 | # 需要停止
24 | if self._stop: return
25 |
26 | self.buffer_queue = ztq_core.get_buffer_queue(buffer_name)
27 | self.task_queue = ztq_core.get_task_queue(buffer_name)
28 | self.buffer_name = buffer_name
29 | self.task_queue_limit = int(buffer_config['thread_limit'])
30 |
31 | while True:
32 | try:
33 | self.start_job()
34 | break
35 | except ztq_core.ConnectionError:
36 | time.sleep(3)
37 |
38 | time.sleep(1)
39 |
40 | def start_job(self):
41 | over_task_limit = self.task_queue_limit - len(self.task_queue)
42 |
43 | # 这个任务可能还没有push上去,服务器就挂了,需要在重新push一次
44 | if getattr(self, 'buffer_task', None):
45 | self.push_buffer_task()
46 |
47 | # 相关的任务线程,处于繁忙状态
48 | if over_task_limit <= 0:
49 | return
50 |
51 | # 不繁忙,就填充满
52 | else:
53 | while over_task_limit > 1:
54 |
55 | # 需要停止
56 | if self._stop: return
57 |
58 | # 得到一个任务
59 | self.buffer_task = self.buffer_queue.pop(timeout=-1)
60 | if self.buffer_task is None:
61 | return
62 |
63 | self.push_buffer_task()
64 |
65 | self.buffer_task = None
66 | over_task_limit -= 1
67 |
68 | def push_buffer_task(self):
69 | if 'runtime' not in self.buffer_task:
70 | self.buffer_task['runtime'] = {}
71 |
72 | ztq_core.push_runtime_task(self.buffer_name, self.buffer_task)
73 |
74 | def stop(self):
75 | self._stop = True
76 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/templates/main.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | 下面是各个工作端的当前工作情况:
8 |
9 |
10 |
11 |
12 | 名字
13 | IP
14 | CPU
15 | 占用
16 | 内存
17 | 上次启动时间
18 | 上报时间
19 | 当前工作
20 | 状态
21 | 操作
22 |
23 |
24 |
25 | {% if workers %}
26 | {% for worker in workers %}
27 |
28 | {{ worker['_worker_name'] }}
29 | {{ worker['ip'] }}
30 | {{ worker['cpu_style'] }}
31 | {{ worker['cpu_percent'] }}
32 | 共 {{ worker['mem_total'] }} | 已用 {{ worker['mem_percent'] }}
33 | {{ worker['_started'] }}
34 | {% if worker['_active'] == 'work' %}
35 | {{ worker['_timestamp'] }}
36 |
37 | {% for thread in worker['_threads'] %}{{thread['_name']}}: {{thread['_comment']}}. {{thread['_take_time']}}秒前 Kill {% endfor %}
38 | {{ worker['_active'] }}
39 | 停止
40 | {% else %}
41 | {{ worker['_timestamp'] }}
42 |
43 | {{ worker['_active'] }}
44 |
45 | 删除
46 | 启用
47 |
48 |
49 | {% endif %}
50 |
51 | {% endfor %}
52 | {% endif %}
53 |
54 |
55 |
56 |
57 |
58 |
--------------------------------------------------------------------------------
/ztq_console/bootstrap.py:
--------------------------------------------------------------------------------
1 | ##############################################################################
2 | #
3 | # Copyright (c) 2006 Zope Corporation and Contributors.
4 | # All Rights Reserved.
5 | #
6 | # This software is subject to the provisions of the Zope Public License,
7 | # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
8 | # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
9 | # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
10 | # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
11 | # FOR A PARTICULAR PURPOSE.
12 | #
13 | ##############################################################################
14 | """Bootstrap a buildout-based project
15 |
16 | Simply run this script in a directory containing a buildout.cfg.
17 | The script accepts buildout command-line options, so you can
18 | use the -c option to specify an alternate configuration file.
19 |
20 | $Id$
21 | """
22 |
23 | import os, shutil, sys, tempfile, urllib2
24 |
25 | tmpeggs = tempfile.mkdtemp()
26 |
27 | is_jython = sys.platform.startswith('java')
28 |
29 | try:
30 | import pkg_resources
31 | except ImportError:
32 | ez = {}
33 | exec urllib2.urlopen('http://peak.telecommunity.com/dist/ez_setup.py'
34 | ).read() in ez
35 | ez['use_setuptools'](to_dir=tmpeggs, download_delay=0)
36 |
37 | import pkg_resources
38 |
39 | if sys.platform == 'win32':
40 | def quote(c):
41 | if ' ' in c:
42 | return '"%s"' % c # work around spawn lamosity on windows
43 | else:
44 | return c
45 | else:
46 | def quote (c):
47 | return c
48 |
49 | cmd = 'from setuptools.command.easy_install import main; main()'
50 | ws = pkg_resources.working_set
51 |
52 | if is_jython:
53 | import subprocess
54 |
55 | assert subprocess.Popen([sys.executable] + ['-c', quote(cmd), '-mqNxd',
56 | quote(tmpeggs), 'zc.buildout'],
57 | env=dict(os.environ,
58 | PYTHONPATH=
59 | ws.find(pkg_resources.Requirement.parse('setuptools')).location
60 | ),
61 | ).wait() == 0
62 |
63 | else:
64 | assert os.spawnle(
65 | os.P_WAIT, sys.executable, quote (sys.executable),
66 | '-c', quote (cmd), '-mqNxd', quote (tmpeggs), 'zc.buildout',
67 | dict(os.environ,
68 | PYTHONPATH=
69 | ws.find(pkg_resources.Requirement.parse('setuptools')).location
70 | ),
71 | ) == 0
72 |
73 | ws.add_entry(tmpeggs)
74 | ws.require('zc.buildout')
75 | import zc.buildout.buildout
76 | zc.buildout.buildout.main(sys.argv[1:] + ['bootstrap'])
77 | shutil.rmtree(tmpeggs)
78 |
--------------------------------------------------------------------------------
/ztq_worker/ztq_worker/system_info/linux.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # encoding: utf-8
3 |
4 | from __future__ import with_statement
5 | import subprocess as sp
6 | import psutil
7 |
8 | def get_cpu_usage():
9 | cpu_usage = psutil.cpu_percent()
10 |
11 | return '%.1f%%'%cpu_usage
12 |
13 | def get_mem_usage():
14 | with open('/proc/meminfo') as mem_info:
15 | mem_total = mem_info.readline()
16 | mem_free = mem_info.readline()
17 | mem_buff = mem_info.readline()
18 | # 部分操作系统内核会MemAvailable一行,所以Buffers应该再取下一行文本来提取读书。
19 | if not mem_buff.startswith('Buffers'):
20 | mem_buff = mem_info.readline()
21 |
22 | mem_cached = mem_info.readline()
23 |
24 | mem_total = int(mem_total.split(':', 1)[1].split()[0])
25 | mem_free = int(mem_free.split(':', 1)[1].split()[0])
26 | mem_buff = int(mem_buff.split(':', 1)[1].split()[0])
27 | mem_cached = int(mem_cached.split(':', 1)[1].split()[0])
28 |
29 | mem_usage = mem_total - (mem_free + mem_buff + mem_cached)
30 | mem_usage = 1.0 * 100 * mem_usage / mem_total
31 |
32 | return ( '%.1f%%'%mem_usage, '%dM'%(mem_total/1024) )
33 |
34 | _CPU_STYLE = None
35 | def get_cpu_style():
36 | global _CPU_STYLE
37 | if _CPU_STYLE is None:
38 | popen = sp.Popen('cat /proc/cpuinfo | grep "model name" | head -n 1', stdout=sp.PIPE, shell=True)
39 | popen.wait()
40 | result = popen.stdout.read()
41 | _CPU_STYLE = result.split(':', 1)[1].strip()
42 | return _CPU_STYLE
43 |
44 | _IP_ADDRESS = None
45 | def get_ip():
46 | def get_ip_address(ifname):
47 | s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
48 | return socket.inet_ntoa(fcntl.ioctl(
49 | s.fileno(),
50 | 0x8915, # SIOCGIFADDR
51 | struct.pack('256s', ifname[:15])
52 | )[20:24])
53 |
54 | global _IP_ADDRESS
55 | if _IP_ADDRESS is None:
56 | import socket, fcntl, struct
57 | popen = sp.Popen("ifconfig -s | cut -d ' ' -f 1", stdout=sp.PIPE, shell=True)
58 | popen.wait()
59 | result = popen.stdout.read()
60 | for iface_name in result.split():
61 | if iface_name in ('Iface', 'lo'):
62 | continue
63 | try:
64 | _IP_ADDRESS = get_ip_address(iface_name)
65 | except:
66 | pass
67 | else:
68 | break
69 | if _IP_ADDRESS is None:
70 | _IP_ADDRESS = '127.0.0.1'
71 |
72 | return _IP_ADDRESS
73 |
74 | if __name__ == '__main__':
75 | print 'cpu style: %s' % get_cpu_style()
76 | print 'cpu usage: %s' % get_cpu_usage()
77 | print 'memory usage: %s, memory total: %s' % get_mem_usage()
78 | print 'local ip addrs: %s'%get_ip()
79 |
80 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/__init__.py:
--------------------------------------------------------------------------------
1 | #coding:utf-8
2 | from pyramid.config import Configurator
3 | import pyramid_jinja2
4 | import ztq_core
5 | import os
6 | from pyramid.authentication import AuthTktAuthenticationPolicy
7 | from pyramid.authorization import ACLAuthorizationPolicy
8 | from ztq_console.utils import models
9 | from ztq_console.utils.security import groupfinder
10 |
11 |
12 | def main(global_config, redis_host='127.0.0.1', redis_port='6379', \
13 | redis_db='0', frs_root='frs', \
14 | frs_cache='frscache', addon_config=None, work_enable=True, **settings):
15 | """ This function returns a Pyramid WSGI application.
16 | """
17 |
18 | # 初始化Redis连接
19 | ztq_core.setup_redis('default', redis_host, port=int(redis_port), db=int(redis_db),)
20 |
21 | # # 开启后台服务
22 | # 初始化fts_web配置
23 | authn_policy = AuthTktAuthenticationPolicy('sosecret', callback=groupfinder, hashalg='sha512')
24 | authz_policy = ACLAuthorizationPolicy()
25 | settings = dict(settings)
26 | settings.setdefault('jinja2.directories', 'ztq_console:templates')
27 | config = Configurator(settings=settings, root_factory='ztq_console.utils.models.RootFactory')
28 | config.set_authentication_policy(authn_policy)
29 | config.set_authorization_policy(authz_policy)
30 | config.begin()
31 | config.add_renderer('.html', pyramid_jinja2.renderer_factory)
32 | config.add_static_view('static', 'ztq_console:static')
33 | config.scan('ztq_console.views')
34 | config.add_route('login', '/login')
35 | config.add_route('logout', '/logout')
36 | config.add_route('password', '/password' )
37 | config.add_route('worker', '/worker/{id}',
38 | view='ztq_console.views.config_worker')
39 | config.add_route('end_thread', '/worker/{id}/{thread}/{pid}',
40 | view='ztq_console.views.stop_working_job')
41 | config.add_route('taskqueue', '/taskqueues/{id}')
42 | config.add_route('taskqueues_config', '/taskqueues/{id}/config',
43 | view='ztq_console.views.config_queue')
44 | config.add_route('taskqueue_action', '/taskqueues_action/{id}')
45 | config.add_route('errorqueues_job', '/errorqueues/{id}/job',
46 | view='ztq_console.views.error_jobs_handler')
47 | config.add_route('workerlog', '/workerlog/{page}')
48 | config.add_route('syslog', '/syslog/{page}')
49 | config.add_route('errorlog', '/errorlog/{page}')
50 | config.add_route('errorqueue', '/errorqueue/{id}/{page}')
51 | config.add_route('redo_all_error_for_queue', '/redo_all_error_for_queue/{id}')
52 | config.add_route('del_all_error_for_queue', '/del_all_error_for_queue/{id}')
53 | if addon_config is not None:
54 | addon_config(config)
55 | config.end()
56 |
57 | return config.make_wsgi_app()
58 |
59 |
--------------------------------------------------------------------------------
/ztq_core/ztq_core/cron.py:
--------------------------------------------------------------------------------
1 | #coding:utf-8
2 |
3 | """ 有一个定时执行的list: ztq:list:cron
4 |
5 | 放进去的工作,会定期自动执行
6 | """
7 | from threading import Thread
8 | import datetime
9 | import time
10 | import model
11 | from task import split_full_func_name, push_task
12 |
13 | CRON_RUNNING = False
14 |
15 | def has_cron(func):
16 | if type(func) == str:
17 | func_name = func
18 | else:
19 | func_name = func.__raw__.__name__
20 | for cron in model.get_cron_set():
21 | if cron['func_name'] == func_name:
22 | return True
23 | return False
24 |
25 | def add_cron(cron_info, full_func, *args, **kw):
26 | """ 定时执行
27 |
28 | cron_info: {'minute':3, 'hour':3,}
29 | """
30 | cron_set = model.get_cron_set()
31 | if type(full_func) == str:
32 | queue_name, func_name = split_full_func_name(full_func)
33 | else:
34 | queue_name = full_func._ztq_queue
35 | func_name = full_func.__raw__.__name__
36 | cron_set.add({'func_name':func_name,
37 | 'cron_info':cron_info,
38 | 'queue': queue_name,
39 | 'args':args,
40 | 'kw':kw})
41 |
42 | def remove_cron(func):
43 | cron_set = model.get_cron_set()
44 | if type(func) == str:
45 | func_name = func
46 | else:
47 | func_name = func.__raw__.__name__
48 | for cron in cron_set:
49 | if cron['func_name'] == func_name:
50 | cron_set.remove(cron)
51 |
52 | class CronThread(Thread):
53 | """ 定时检查cron列表,如果满足时间条件,放入相关的队列 """
54 | def __init__(self):
55 | super(CronThread, self).__init__()
56 |
57 | def run(self):
58 | """
59 | 获取cron_info信息格式:{'minute':3, 'hour':3,}
60 | """
61 | cron_set = model.get_cron_set()
62 | while True:
63 | # 遍历cron列表检查并检查定时执行信息
64 | for cron in cron_set:
65 | execute_flag = self.check_cron_info(cron['cron_info'])
66 | if execute_flag:
67 | push_task(cron['queue'] + ':' + cron['func_name'], *cron['args'], **cron['kw'])
68 |
69 | time.sleep(55)
70 |
71 | def check_cron_info(self, cron_info):
72 | """检查定时执行信息是否满足条件
73 | """
74 | time_now = datetime.datetime.now()
75 | hour_cron = int(cron_info.get('hour', -1))
76 | if hour_cron != -1:
77 | hour_now = int(time_now.hour)
78 | if hour_now != hour_cron:
79 | return False
80 |
81 | minute_cron = int(cron_info.get('minute', 0))
82 | minute_now = int(time_now.minute)
83 | if minute_cron != minute_now:
84 | return False
85 | return True
86 |
87 | def start_cron():
88 | global CRON_RUNNING;
89 | CRON_RUNNING = True
90 | cron_thread = CronThread()
91 | cron_thread.setDaemon(True)
92 | cron_thread.start()
93 |
--------------------------------------------------------------------------------
/ztq_worker/ztq_worker/command_thread.py:
--------------------------------------------------------------------------------
1 | # -*- encoding: utf-8 -*-
2 |
3 | from config_manager import CONFIG
4 | from command_execute import report, kill_transform, cancel_transform, set_job_threads
5 | from threading import Thread
6 | import logging
7 | import time
8 | import ztq_core
9 |
10 | logger = logging.getLogger("ztq_worker")
11 |
12 | class CommandThread(Thread):
13 | """ 监视命令队列,取得命令, 执行命令"""
14 |
15 | def __init__(self, worker_name=''):
16 | super(CommandThread, self).__init__()
17 | self.login_time = int(time.time())
18 | self.worker_name = worker_name or CONFIG['server']['alias']
19 |
20 | def init(self):
21 | """ 开机初始化工作 """
22 | reboot = False
23 | worker_state = ztq_core.get_worker_state()
24 | if self.worker_name in worker_state:
25 | # 重启,读取服务器配置信息
26 | reboot = True
27 | # 记录系统日志
28 | system_log = ztq_core.get_system_log_queue()
29 | system_log.push(dict( host=CONFIG['server']['alias'],
30 | alias=self.worker_name,
31 | type=reboot and 'reboot' or 'power',
32 | timestamp=self.login_time,))
33 | # 报告机器状态
34 | worker_state[self.worker_name] = report(self.login_time)
35 |
36 | def run(self):
37 | self.init()
38 | # 监听指令
39 | commands = ztq_core.get_command_queue(self.worker_name)
40 | while True:
41 | try:
42 | command = commands.pop()
43 | if command['command'] == 'report':
44 | worker_state = ztq_core.get_worker_state()
45 | worker_state[self.worker_name] = report(self.login_time)
46 | elif command['command'] == 'updatedriver':
47 | # TODO
48 | #async_drive_config()
49 | pass
50 | elif command['command'] == 'updateworker' and \
51 | CONFIG['server'].get('active_config', 'false').lower() == 'true':
52 | queue = ztq_core.get_worker_config()
53 | set_job_threads(queue[self.worker_name])
54 | elif command['command'] == 'kill':
55 | kill_transform(pid=command['pid'], timestamp=command['timestamp'])
56 | elif command['command'] == 'cancel':
57 | cancel_transform(pid=command['pid'], timestamp=command['timestamp'])
58 | except ztq_core.ConnectionError, e:
59 | logger.error('ERROR: redis command connection error: %s' % str(e))
60 | time.sleep(3)
61 | except ztq_core.ResponseError, e:
62 | logger.error('ERROR: redis command response error: %s' % str(e))
63 | time.sleep(3)
64 |
65 | except KeyboardInterrupt:
66 | import os
67 | # 实际上调用的是command_execute.clear_thread
68 | os.sys.exitfunc()
69 | os._exit(0)
70 | except Exception, e:
71 | logger.error('ERROR: redis command unknown error: %s' % str(e))
72 | time.sleep(3)
73 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/templates/workerlog.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | 最近{{sindex}} ~ {{eindex}}条工作历史如下:
10 |
11 |
12 |
13 |
14 | 状态
15 | 提交时间
16 | 服务器
17 | 任务名
18 | 开始时间
19 | 结束时间
20 | 备注
21 |
22 |
23 | 任务信息
24 |
25 |
26 |
27 | {% if worker_log %}
28 | {% for log in worker_log %}
29 |
30 |
31 | {% if log['_status'] %}
32 | 出错
33 | {% else %}
34 | 成功
35 | {% endif %}
36 |
37 | {{ log['_created'] }}
38 | {{ log['_server'] }}
39 | {{ log['_func'] }}
40 | {{ log['_start'] }}
41 | {{ log['_end'] }}
42 | {{ log['_comment'] }}
43 |
44 |
45 |
46 | {% if log['_file'] %}
47 | 文件: {{log['_file']}}
48 | {% endif %}
49 |
50 | {% if log['_reason'] %}
51 | {{ log['_reason'] }}
52 | {% endif %}
53 | {{ log['_detail'] }} 详细信息
54 |
55 |
56 | {% endfor %}
57 | {% endif %}
58 |
59 |
60 |
61 |
62 |
63 | {% if fpage %}
64 | 上一页
65 | {% else %}
66 | 上一页
67 | {% endif %}
68 |
69 | < {{npage-1}} >
70 |
71 | 下一页
72 |
73 |
74 |
75 |
76 |
77 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/static/style_login.css:
--------------------------------------------------------------------------------
1 | h1 { /* table elements 表格元素 */
2 | margin: 0;
3 | padding: 0;
4 | }
5 | h1 { font-size:24px; }
6 | h1 { font-weight:normal; }
7 |
8 | label {
9 | width: 150px;
10 | padding-top: 2px;
11 | display:inline-block;
12 | text-align: right;
13 | padding-right:10px;
14 | }
15 |
16 | .info {
17 | color:gray;
18 | font-size: 12px;
19 | }
20 |
21 | select {
22 | display: span;
23 | }
24 |
25 | li{
26 | list-style:none;
27 | margin-top:3px;
28 | }
29 |
30 |
31 | .button {
32 | position: relative;
33 | /* overflow: visible; */
34 | display: inline-block;
35 | padding: 0.5em 1em;
36 | *padding-left:0.5em;
37 | *padding-right:0.5em;
38 | padding-bottom: 0.3em\9;
39 | border: 1px solid #d4d4d4;
40 | margin: 0;
41 | text-decoration: none;
42 | text-shadow: 1px 1px 0 #fff;
43 | /*font:11px/normal sans-serif;*/
44 | font-size: 12px;
45 | font-size: 11px\9; /* Only IE */
46 | color: #333;
47 | white-space: nowrap;
48 | cursor: pointer;
49 | outline: none;
50 | background-color: #ececec;
51 | background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#f4f4f4), to(#ececec));
52 | background-image: -moz-linear-gradient(#f4f4f4, #ececec);
53 | background-image: -o-linear-gradient(#f4f4f4, #ececec);
54 | background-image: linear-gradient(#f4f4f4, #ececec);
55 | -webkit-background-clip: padding;
56 | -moz-background-clip: padding;
57 | -o-background-clip: padding-box;
58 | /*background-clip: padding-box;*/ /* commented out due to Opera 11.10 bug */
59 | -webkit-border-radius: 0.2em;
60 | -moz-border-radius: 0.2em;
61 | border-radius: 0.2em;
62 | /* IE hacks */
63 | zoom: 1;
64 | *display: inline;
65 | }
66 |
67 | .button:hover,
68 | .button:focus,
69 | .button:active,
70 | .button.active {
71 | border-color: #3072b3;
72 | border-bottom-color: #2a65a0;
73 | text-decoration: none;
74 | text-shadow: -1px -1px 0 rgba(0,0,0,0.3);
75 | color: #fff !important;
76 | background-color: #3C8DDE;
77 | background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#599bdc), to(#3072b3));
78 | background-image: -moz-linear-gradient(#599bdc, #3072b3);
79 | background-image: -o-linear-gradient(#599bdc, #3072b3);
80 | background-image: linear-gradient(#599bdc, #3072b3);
81 | }
82 |
83 | .button:active,
84 | .button.active {
85 | border-color: #2a65a0;
86 | border-bottom-color: #3884CF;
87 | background-color: #3072b3;
88 | background-image: -webkit-gradient(linear, 0 0, 0 100%, from(#3072b3), to(#599bdc));
89 | background-image: -moz-linear-gradient(#3072b3, #599bdc);
90 | background-image: -o-linear-gradient(#3072b3, #599bdc);
91 | background-image: linear-gradient(#3072b3, #599bdc);
92 | _background:#3884CF !important;
93 | _border:1px solid #2A65A0 !important;
94 | }
95 |
96 | /* overrides extra padding on button elements in Firefox */
97 | .button::-moz-focus-inner {
98 | padding: 0;
99 | border: 0;
100 | }
101 |
102 | a:link {
103 | color: blue;
104 | }
105 |
106 | input[type='text'] {
107 | width:300px
108 | }
109 |
110 | input[type='password'] {
111 | width:300px
112 | }
113 |
114 | input {
115 | width:expression(this.type=="text"?"300px":"style");
116 | }
117 |
--------------------------------------------------------------------------------
/ztq_worker/ztq_worker/main.py:
--------------------------------------------------------------------------------
1 | # -*- encoding: utf-8 -*-
2 |
3 | import sys, os
4 | from command_thread import CommandThread
5 | from config_manager import read_config_file
6 | from command_execute import init_job_threads
7 | from system_info import get_ip
8 |
9 | import ztq_core
10 |
11 | def run():
12 | conf_file = ''
13 | # 用户指定一个配置文件
14 | if len(sys.argv) > 1:
15 | conf_file = sys.argv[1]
16 |
17 | config = read_config_file(conf_file)
18 | main(config)
19 |
20 | def main(config, thread=False):
21 | """ 主函数
22 |
23 | config: {'server': {host:, port:, db:}
24 | }
25 | """
26 |
27 | server = config['server']
28 | # 动态注册task
29 | for module in server['modules'].split():
30 | try:
31 | __import__(module)
32 | except ImportError:
33 | modules = module.split('.')
34 | __import__(modules[0], globals(), locals(), modules[1])
35 |
36 | # 连结服务器
37 | redis_host = server['host']
38 | redis_port = int(server['port'])
39 | redis_db = int(server['db'])
40 | ztq_core.setup_redis('default', host=redis_host, port=redis_port, db=redis_db)
41 |
42 | # 开启一个命令线程
43 | alias = server.get('alias', '')
44 | if not alias:
45 | alias = get_ip()
46 | server['alias'] = alias
47 |
48 | command_thread = CommandThread(worker_name=alias)
49 |
50 | sys.stdout.write('Starting server in PID %s\n'%os.getpid())
51 |
52 | worker_state = ztq_core.get_worker_state()
53 | active_config = server.get('active_config', 'false')
54 |
55 | # 计算那些是需要根据线上配置启动的队列
56 | active_queue_config = {}
57 | if active_config.lower() == 'true' and command_thread.worker_name in worker_state:
58 | # 如果服务器有这个机器的配置信息,需要自动启动工作线程
59 | worker_config = ztq_core.get_worker_config()
60 | active_queue_config = worker_config.get(command_thread.worker_name, {})
61 |
62 | # 根据本地配置,启动的队列
63 | local_queue_config = {}
64 | if config['queues']:
65 | # 把worker监视队列的情况上报到服务器
66 | queue_config = ztq_core.get_queue_config()
67 | # 如果配置有queues,自动启动线程监视
68 | for queue_name, sleeps in config['queues'].items():
69 | # 线上配置稍后再设置
70 | if queue_name in active_queue_config: continue
71 |
72 | local_queue_config[queue_name] = [
73 | {'interval': int(sleep)} for sleep in sleeps.split(',')
74 | ]
75 | if not queue_config.get(queue_name, []):
76 | queue_config[queue_name] = {'name':queue_name, 'title':queue_name, 'widget': 5}
77 |
78 |
79 | # 合并线上和线下的配置
80 | active_queue_config.update(local_queue_config)
81 | init_job_threads(active_queue_config)
82 |
83 | loggers = config['log']
84 | initlog(
85 | loggers.get('key', 'ztq_worker'),
86 | loggers.get('handler_file'),
87 | loggers.get('level', 'ERROR'),
88 | )
89 |
90 | # 不是以线程启动
91 | if thread:
92 | command_thread.setDaemon(True)
93 | command_thread.start()
94 | else:
95 | command_thread.run()
96 |
97 | def initlog(key, handler_file, level):
98 | import logging
99 | level = logging.getLevelName(level)
100 | format = '%(asctime)s %(message)s'
101 | if not handler_file:
102 | logging.basicConfig(level=level, format=format)
103 | else:
104 | logging.basicConfig(
105 | filename=handler_file,
106 | filemode='a',
107 | level=level,
108 | format=format
109 | )
110 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/templates/queues.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | 转换队列
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 | 队列
25 | 线程数
26 | 任务数
27 | 缓存任务数
28 | 任务首个时间
29 | 错误数
30 | 错误最新时间
31 | 错误任务的操作
32 |
33 |
34 |
35 |
36 | {% for task_queue in task_queues %}
37 |
38 | {{ task_queue['name'] }}
39 | {{ task_queue['workers'].__len__() }}
40 | ↑
41 | ↓
42 |
43 |
44 | {% if task_queue['length'] == 0 %}
45 | 0
46 | {% else %}
47 | {{ task_queue['length']}}
48 | {% endif %}
49 |
50 |
51 | {{ task_queue['buffer_length'] }}
52 |
53 | {{ task_queue['first'] }}
54 |
55 | {% if task_queue['error_length'] == 0 %}
56 | 0
57 | {% else %}
58 | {{ task_queue['error_length'] }}
59 |
60 | {% endif %}
61 |
62 | {{ task_queue['error_end'] }}
63 |
64 | 全部重做
66 | 全部删除
68 |
69 |
70 | {% endfor %}
71 |
72 |
73 | 合计
74 |
75 | {{task_job_length}}
76 |
77 |
78 | {{error_job_length}}
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/static/style.css:
--------------------------------------------------------------------------------
1 | /*
2 | KISSY CSS Reset
3 | 理念:清除和重置是紧密不可分的
4 | 特色:1.适应中文 2.基于最新主流浏览器
5 | 维护:玉伯(lifesinger@gmail.com), 正淳(ragecarrier@gmail.com)
6 | */
7 |
8 | /* 清除内外边距 */
9 | body, h1, h2, h3, h4, h5, h6, p, blockquote, /* structural elements 结构元素 */
10 | dl, dt, dd, ul, ol, li, /* list elements 列表元素 */
11 | pre, /* text formatting elements 文本格式元素 */
12 | fieldset, lengend, button, input, textarea, /* form elements 表单元素 */
13 | th, td { /* table elements 表格元素 */
14 | margin: 0;
15 | padding: 0;
16 | }
17 |
18 | /* 设置默认字体 */
19 | body,
20 | button, input, select, textarea, table { /* for ie */
21 | /*font: 12px/1 Tahoma, Helvetica, Arial, "宋体", sans-serif;*/
22 | font: 13px/1 Arial,Verdana,sans-serif; /* 用 ascii 字符表示,使得在任何编码下都无问题 */
23 | }
24 | button, input, select, textarea { padding:3px; line-height:1em; margin:0;}
25 | input { line-height:normal; }
26 | h1 { font-size:24px; }
27 | h2 { font-size:22px; }
28 | h3 { font-size:20px; }
29 | h4 { font-size:18px; }
30 | h5 { font-size:15px; }
31 | h1, h2, h3, h4, h5 { font-weight:normal; }
32 |
33 | address, cite, dfn, var { font-style: normal; } /* 将斜体扶正 */
34 | small { font-size: 11px; } /* 小于 12px 的中文很难阅读,让 small 正常化 */
35 |
36 | /* 重置文本格式元素 */
37 |
38 | abbr[title], acronym[title] { /* 注:1.ie6 不支持 abbr; 2.这里用了属性选择符,ie6 下无效果 */
39 | border-bottom: 1px dotted;
40 | cursor: help;
41 | }
42 |
43 | q:before, q:after { content: ''; }
44 |
45 | /* 重置表单元素 */
46 | legend { color: #000; } /* for ie6 */
47 | fieldset, img { border: none; } /* img 搭车:让链接里的 img 无边框 */
48 | /* 注:optgroup 无法扶正 */
49 | button, input, select, textarea {
50 | font-size: 100%; /* 使得表单元素在 ie 下能继承字体大小 */
51 | }
52 |
53 | /* 重置表格元素 */
54 | table {
55 | border-collapse: collapse;
56 | border-spacing: 0;
57 | }
58 |
59 | /* 让非ie浏览器默认也显示垂直滚动条,防止因滚动条引起的闪烁 */
60 | /*html { overflow-y: scroll; }*/
61 |
62 | /* reset end */
63 |
64 |
65 |
66 |
67 |
68 | /*--------------------------------------------------------*/
69 |
70 | a { color:#3366CC; }
71 | a:hover { color:#BD0A01; }
72 |
73 | body { }
74 | pre { font-family:'Courier New',sans-serif; }
75 | .no-wrap { white-space:nowrap; }
76 | .bt { background:#EEE; color:#333; border:1px solid #CCC; border-radius:5px; padding:3px 5px; }
77 | a.bt { text-decoration:none; }
78 | a.bt:hover { background:#CCC; border:1px slid #999; }
79 | .red { color:red; }
80 | .selected a { color:red !important; }
81 | .clear { clear:both; }
82 |
83 | /* begin header*/
84 | #header{ border-bottom:5px solid #4396C3; padding:5px; background:url(images/header-bg.jpg) repeat-x; }
85 | #header h1 { font-size:42px; line-height:60px; padding:0 0 0 15px; color:#666; float:left; }
86 | .powered { float:right; color:#666; padding-top:40px; padding-right:10px}
87 |
88 | /* begin menu*/
89 | .menu{ width:800px;margin:20;font-size:15px; }
90 |
91 | #menu { padding:5px 10px; border-radius:3px; background:#EDF5FA; margin:0 10px; }
92 | #menu p { padding:10px 5px; font-size:14px; font-weight:700; border-bottom:1px solid #CCC;}
93 | #menu p a { text-decoration:none; color:#000; }
94 | #menu p a:hover { color:red; }
95 |
96 | /* content page title */
97 | #discribe{ margin-bottom:20px; font-size:24px; }
98 |
99 | #status-section{ }
100 | #status-section table { border-spacing: 0; width: 100%; table-layout: fixed; }
101 | #status-section table td { word-wrap:break-word; }
102 | #status-section th { border: 1px solid #CCC; padding:10px; text-align:left;background:#F5F5F5; font-size:13px; white-space:nowrap; }
103 | #status-section td { border-bottom: 1px solid #CCC; padding:10px; vertical-align:top; }
104 |
105 | #content{ position:absolute; width:80%; height:80%; left: 250px; top: 80px; }
106 |
107 | #iframe1{ width:100%; height:100%; }
108 |
109 | #sibebar{ position:absolute; top:80px; left:-15px; font-size:15px; }
110 | #sibebar ul li { float: inherit; border-bottom:5px; list-style: none; }
111 | #sibebar ul li a:hover { top: 10px; color: #a9a779; text-decoration:underline; }
112 |
113 | .show_code { text-align: left; }
114 |
--------------------------------------------------------------------------------
/ztq_core/test/test_redis_wrap.py:
--------------------------------------------------------------------------------
1 | #coding:utf-8
2 | '''
3 | 测试说明:
4 | 此测试是针对redis_wrap库进行自动json编码的测试
5 |
6 | 测试结果:
7 | Ran 5 tests in 0.036s
8 |
9 | FAILED (failures=1)
10 |
11 | 失败原因是在对list进行remove(value)操作的时候,redis的lrem无法删除序列化后的对象,
12 | set类型能正常remove序列化后的对象.
13 |
14 | @author: Zay
15 | '''
16 | import unittest
17 | from ztq_core import get_redis, get_list, get_hash, get_set, get_dict, setup_redis, \
18 | get_key, set_key, get_queue
19 |
20 | class TestRediswrap(unittest.TestCase):
21 | def setUp(self):
22 | """初始化连接redis,和初始化变量
23 | """
24 | setup_redis('default', '192.168.209.128', 6379, socket_timeout=2)
25 | get_redis(system='default').delete('list')
26 | get_redis(system='default').delete('set')
27 | get_redis(system='default').delete('hash')
28 | get_redis(system='default').delete('dict')
29 | get_redis(system='default').delete('kv')
30 | get_redis(system='default').delete('queue')
31 | self.message = {"hello":"grizzly"}
32 |
33 | def test_getset(self):
34 | """进行基本的redis 的key进行get和set的操作.
35 | """
36 | Test_key = get_key('kv',serialized_type='json')
37 | self.assertEqual(Test_key,None)
38 |
39 | set_key('kv',self.message)
40 |
41 | Test_key = get_key('kv',serialized_type='json')
42 | self.assertEqual(Test_key,self.message)
43 |
44 | def test_dict(self):
45 | """测试redis_wrap的dict类型的操作
46 | """
47 | Test_dict = get_dict('dict',serialized_type='json')
48 |
49 | Test_dict['id'] = self.message
50 | self.assertEqual(self.message, Test_dict['id'])
51 |
52 | for k,v in Test_dict.items():
53 | self.assertEqual(k, 'id')
54 | self.assertEqual(v, self.message)
55 |
56 | del Test_dict['id']
57 | self.assertNotEqual(self.message,Test_dict.get('id'))
58 |
59 | def test_hash(self):
60 | """测试redis_wrap的 hash类型的操作
61 | """
62 | Test_dict = get_hash('hash',serialized_type='json')
63 |
64 | Test_dict['id'] = self.message
65 | self.assertEqual(self.message, Test_dict['id'])
66 |
67 | del Test_dict['id']
68 | self.assertNotEqual(self.message,Test_dict.get('id'))
69 |
70 | def test_list(self):
71 | """进行redis_wrap的list的基本操作
72 | """
73 | Test_list = get_list('list',serialized_type='json')
74 |
75 | Test_list.append(self.message)
76 | self.assertEqual( len(Test_list),1)
77 |
78 | for item in Test_list:
79 | self.assertEqual(self.message, item)
80 |
81 | #这一步失败原因是redis的lrem方法有无法删除序列化后的数据
82 | Test_list.remove(self.message)
83 | self.assertEqual( len(Test_list),0)
84 |
85 | def test_set(self):
86 | """进行对redis_wrap的set类型的基本操作
87 | """
88 | Test_set = get_set('set',serialized_type='json')
89 | Test_set.add(self.message)
90 |
91 | for item in Test_set:
92 | self.assertEqual( item,self.message)
93 |
94 | Test_set.remove(self.message)
95 | self.assertEqual( len(Test_set),0)
96 |
97 | def test_queue(self):
98 | """进行redis_wrap的queue的基本操作
99 | """
100 | Test_queue = get_queue('queue',serialized_type='json')
101 |
102 | Test_queue.push(self.message)
103 | self.assertEqual( len(Test_queue),1)
104 |
105 | for item in Test_queue:
106 | self.assertEqual(self.message, item)
107 |
108 | #这一步失败原因是redis的lrem方法有无法删除数据
109 | Test_queue.remove(self.message)
110 | self.assertEqual( len(Test_queue),0)
111 | #===========================================================================
112 | #
113 | # message = Test_queue.pop(timeout= 1)
114 | # self.assertEqual(self.message, message)
115 | # self.assertEqual(len(Test_queue),0)
116 | #===========================================================================
117 |
118 | if __name__ == '__main__':
119 | unittest.main()
120 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/templates/errorlog.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 | 最近{{sindex}} ~ {{eindex}}条错误清单如下:
13 |
14 |
15 |
16 |
17 |
18 | 所属队列
19 | 工作端
20 | 开始时间
21 | 结束时间
22 | 出错类型
23 | 操作
24 |
25 |
26 |
27 |
如下工作发生异常,您可以删除或者,重新执行:
28 |
29 |
30 |
31 | {% if error_jobs %}
32 | {% for queue in error_jobs %}
33 | {% for job in queue %}
34 |
35 |
36 | {{ job['_queue_name'] }}
37 |
38 | {{ job['_server'] }}
39 | {{ job['_start'] }}
40 | {{ job['_end'] }}
41 | {% if job['_error_mime'] %}{{ job['_error_mime'] }}{% else %}未知{% endif %}
42 |
43 |
44 | {{ job['_detail'] }} 详细信息
45 | 重做
46 | 删除
47 |
48 |
49 |
50 |
51 | {% if job['_file'] %}
52 | 出错文件:{{ job['_file'] }}
53 | {% endif %}
54 | {{ job['_reason'] }}
55 |
56 |
57 | {% endfor %}
58 | {% endfor %}
59 | {% endif %}
60 |
61 |
62 |
63 |
64 | {% if fpage %}
65 | 上一页
66 | {% else %}
67 | 上一页
68 | {% endif %}
69 |
70 | < {{npage-1}} >
71 |
72 | 下一页
73 |
74 |
75 |
76 |
77 |
109 |
110 |
111 |
112 |
--------------------------------------------------------------------------------
/ztq_core/README.txt:
--------------------------------------------------------------------------------
1 |
2 | ZTQ:Zopen Task Queue
3 | ===========================================
4 |
5 | 简介
6 | --------------------
7 |
8 | ZTQ 队列服务, 分为3个包:ztq_core, ztq_worker, ztq_console。默认使用redis作为队列的后端。
9 |
10 | ztq_core ::
11 |
12 | 提供一系列的方法把任务push到队列中,由ztq_worker去获取队列任务并且执行。
13 |
14 | 你可以在这里找到它: http://pypi.python.org/pypi/ztq_core/
15 |
16 | ztq_worker::
17 |
18 | 队列的接收端,以线程为单位阻塞式的去监视一个队列。每一个线程称为Worker
19 | 当有任务push到了队列中,相应的Worker会自动pull下来去执行。
20 |
21 | 你可以在这里找到它: http://pypi.python.org/pypi/ztq_worker/
22 |
23 | ztq_console::
24 |
25 | 对每一个队列的每一个任务执行情况进行监控、下达指令。这个包是可选的
26 |
27 | 你可以在这里找到它: http://pypi.python.org/pypi/ztq_console/
28 |
29 | 关于 ZTQ
30 | --------------------
31 | ::
32 |
33 | * 开源, 使用MIT 许可
34 | * 基于Python, 容易使用和修改
35 | * 支持linux 和 windows
36 | * 可靠,可以应付突然断电等情况
37 | * 可管理,自身带有ztq_console 监控后台
38 | * 灵活,可以在不同的机器上运行多个Worker, 并且随时热插拔Worker
39 | * 使用简单
40 |
41 | 安装
42 | --------------------
43 | ::
44 |
45 | pip install ztq_core
46 | pip install ztq_worker
47 | pip install ztq_console
48 |
49 | 使用
50 | -------------------
51 |
52 | #. 先定义一个普通的任务 ::
53 |
54 | # my_send.py
55 |
56 | def send(body):
57 | print ‘START: ‘, body
58 | sleep(5)
59 | print ‘END:’, body
60 |
61 |
62 | def send2(body):
63 | print ‘START2’, body
64 | raise Exception(‘connection error’)
65 |
66 |
67 | #. 将普通的任务改成队列任务 ::
68 |
69 | # my_send.py
70 |
71 | import time
72 | from ztq_core import async
73 |
74 | @async # 使用默认队列default
75 | def send(body):
76 | print ‘START: ‘, body
77 | sleep(5)
78 | print ‘END:’, body
79 |
80 | @async(queue=‘mail’) # 使用队列mail
81 | def send(body):
82 | print ‘START2’, body
83 | raise Exception(‘connection error’)
84 |
85 |
86 | #. 运行worker ::
87 |
88 | # 运行:bin/ztq_worker app.ini
89 |
90 | # app.ini 例子, 在ztq_worker 包里面有个config 目录放有app.ini 这个文件
91 |
92 | [server]
93 | host = localhost
94 | port = 6379
95 | db = 0
96 | alias = w01
97 | active_config = false
98 | modules = my_send # 所有需要import的任务模块,每个一行
99 |
100 | [queues]
101 | default= 0 # default队列,起1个处理线程
102 | mail = 0, 0 # mail队列,起2个处理线程
103 |
104 | [log]
105 | handler_file = ./ztq_worker.log
106 | level = ERROR
107 |
108 | #. 运行 ::
109 |
110 | import ztq_core
111 | from my_send import send
112 |
113 | # 设置 Redis 连接
114 | ztq_core.setup_redis(‘default’, ‘localhost’, 6379, 0)
115 |
116 | send(‘hello, world’)
117 |
118 | # 动态指定queue
119 | send(‘hello world from mail’, ztq_queue=‘mail’)
120 |
121 | #. 更详细的测试例子可见ztq_core包下的demo.py
122 |
123 | 使用更高级的特征
124 | --------------------------
125 |
126 | #. 抢占式执行 ::
127 |
128 | # 后插入先执行。如果任务已经在队列,会优先
129 | send (body, ztq_first=True)
130 |
131 | #. 探测任务状态 ::
132 |
133 | # ztq_first存在就优先, ztq_run不存在就运行
134 | # 返回的是"running" 代表正在运行, 是"queue" 代表正在排队
135 | # 如果是"error" 代表出错, 是"none" 代表这个任务不在排队,也没在执行
136 | ping_task(send, body, ztq_first=True, ztq_run=True)
137 |
138 | #. 支持事务 ::
139 |
140 | import transaction
141 | ztq_core.enable_transaction(True)
142 | send_mail(from1, to1, body1)
143 | send_mail(from2, to2, body2)
144 | transaction.commit()
145 | # 也可以单独关闭事务
146 | send_mail(from2, to2, body2, ztq_transaction=False)
147 |
148 | #. 定时任务 ::
149 |
150 | from ztq_core.async import async
151 | from ztq_core import redis_wrap
152 | from ztq_core.cron import has_cron, add_cron_job
153 |
154 | @async(queue='clock-0')
155 | def bgrewriteaof():
156 | """ 将redis的AOF文件压缩 """
157 | redis = redis_wrap.get_redis()
158 | redis.bgrewriteaof()
159 |
160 |
161 | # 如果队列上没有这个定时任务,就加上。自动定时压缩reids
162 | if not has_cron(bgrewriteaof):
163 | add_cron({'hour':1}, bgrewriteaof)
164 |
165 | #. 任务串行 ::
166 |
167 | from ztq_core import prepare_task
168 | # 根据(方法,参数)生成一个任务
169 | callback = prepare_task(send, body)
170 | # 执行完 send_mail 之后队列会自动将callback 放入指定的队列
171 | send_mail(body, ztq_callback=callback)
172 |
173 | #. 异常处理 ::
174 |
175 | from ztq_core import prepare_task
176 |
177 | @async(queue='mail')
178 | def fail_callback(return_code, return_msg):
179 | print return_code, return_msg
180 |
181 | fcallback = prepare_task(send2)
182 |
183 | # 如果任务 send 抛出了任何异常,都会将fcallback 放入指定队列
184 | send(body, ztq_fcallback=fcallback)
185 |
186 | #. 进度回调 ::
187 |
188 | import ztq_worker
189 | @async(queue='doc2pdf')
190 | def doc2pdf(filename):
191 | ...
192 | # 可被进度回调函数调用
193 | ztq_worker.report_progress(page=2)
194 | ...
195 |
196 | from ztq_core import prepare_task
197 | pcallback = prepare_task(send2, body)
198 | doc2pdf(filename, ztq_pcallback=pcallback)
199 |
200 | #. 批处理 ::
201 |
202 | # 为提升性能,需要多个xapian索引操作,一次性提交数据库
203 | @async(queue=‘xapian’)
204 | def index(data):
205 | pass
206 |
207 | def do_commit():
208 | xapian_conn.commit()
209 |
210 | # 每执行20个索引任务之后,一次性提交数据库
211 | # 不够20个,但队列空的时候,也会提交
212 | register_batch_queue(‘xapian’, 20, batch_func=do_commit)
213 |
214 |
--------------------------------------------------------------------------------
/ztq_console/README.txt:
--------------------------------------------------------------------------------
1 |
2 | ZTQ:Zopen Task Queue
3 | ===========================================
4 |
5 | 简介
6 | --------------------
7 |
8 | ZTQ 队列服务, 分为3个包:ztq_core, ztq_worker, ztq_console。默认使用redis作为队列的后端。
9 |
10 | ztq_core ::
11 |
12 | 提供一系列的方法把任务push到队列中,由ztq_worker去获取队列任务并且执行。
13 |
14 | 你可以在这里找到它: http://pypi.python.org/pypi/ztq_core/
15 |
16 | ztq_worker::
17 |
18 | 队列的接收端,以线程为单位阻塞式的去监视一个队列。每一个线程称为Worker
19 | 当有任务push到了队列中,相应的Worker会自动pull下来去执行。
20 |
21 | 你可以在这里找到它: http://pypi.python.org/pypi/ztq_worker/
22 |
23 | ztq_console::
24 |
25 | 对每一个队列的每一个任务执行情况进行监控、下达指令。这个包是可选的
26 |
27 | 你可以在这里找到它: http://pypi.python.org/pypi/ztq_console/
28 |
29 | 关于 ZTQ
30 | --------------------
31 | ::
32 |
33 | * 开源, 使用MIT 许可
34 | * 基于Python, 容易使用和修改
35 | * 支持linux 和 windows
36 | * 可靠,可以应付突然断电等情况
37 | * 可管理,自身带有ztq_console 监控后台
38 | * 灵活,可以在不同的机器上运行多个Worker, 并且随时热插拔Worker
39 | * 使用简单
40 |
41 | 安装
42 | --------------------
43 | ::
44 |
45 | pip install ztq_core
46 | pip install ztq_worker
47 | pip install ztq_console
48 |
49 | 使用
50 | -------------------
51 |
52 | #. 先定义一个普通的任务 ::
53 |
54 | # my_send.py
55 |
56 | def send(body):
57 | print ‘START: ‘, body
58 | sleep(5)
59 | print ‘END:’, body
60 |
61 |
62 | def send2(body):
63 | print ‘START2’, body
64 | raise Exception(‘connection error’)
65 |
66 |
67 | #. 将普通的任务改成队列任务 ::
68 |
69 | # my_send.py
70 |
71 | import time
72 | from ztq_core import async
73 |
74 | @async # 使用默认队列default
75 | def send(body):
76 | print ‘START: ‘, body
77 | sleep(5)
78 | print ‘END:’, body
79 |
80 | @async(queue=‘mail’) # 使用队列mail
81 | def send(body):
82 | print ‘START2’, body
83 | raise Exception(‘connection error’)
84 |
85 |
86 | #. 运行worker ::
87 |
88 | # 运行:bin/ztq_worker app.ini
89 |
90 | # app.ini 例子, 在ztq_worker 包里面有个config 目录放有app.ini 这个文件
91 |
92 | [server]
93 | host = localhost
94 | port = 6379
95 | db = 0
96 | alias = w01
97 | active_config = false
98 | modules = my_send # 所有需要import的任务模块,每个一行
99 |
100 | [queues]
101 | default= 0 # default队列,起1个处理线程
102 | mail = 0, 0 # mail队列,起2个处理线程
103 |
104 | [log]
105 | handler_file = ./ztq_worker.log
106 | level = ERROR
107 |
108 | #. 运行 ::
109 |
110 | import ztq_core
111 | from my_send import send
112 |
113 | # 设置 Redis 连接
114 | ztq_core.setup_redis(‘default’, ‘localhost’, 6379, 0)
115 |
116 | send(‘hello, world’)
117 |
118 | # 动态指定queue
119 | send(‘hello world from mail’, ztq_queue=‘mail’)
120 |
121 | #. 更详细的测试例子可见ztq_core包下的demo.py
122 |
123 | 使用更高级的特征
124 | --------------------------
125 |
126 | #. 抢占式执行 ::
127 |
128 | # 后插入先执行。如果任务已经在队列,会优先
129 | send (body, ztq_first=True)
130 |
131 | #. 探测任务状态 ::
132 |
133 | # ztq_first存在就优先, ztq_run不存在就运行
134 | # 返回的是"running" 代表正在运行, 是"queue" 代表正在排队
135 | # 如果是"error" 代表出错, 是"none" 代表这个任务不在排队,也没在执行
136 | ping_task(send, body, ztq_first=True, ztq_run=True)
137 |
138 | #. 支持事务 ::
139 |
140 | import transaction
141 | ztq_core.enable_transaction(True)
142 | send_mail(from1, to1, body1)
143 | send_mail(from2, to2, body2)
144 | transaction.commit()
145 | # 也可以单独关闭事务
146 | send_mail(from2, to2, body2, ztq_transaction=False)
147 |
148 | #. 定时任务 ::
149 |
150 | from ztq_core.async import async
151 | from ztq_core import redis_wrap
152 | from ztq_core.cron import has_cron, add_cron_job
153 |
154 | @async(queue='clock-0')
155 | def bgrewriteaof():
156 | """ 将redis的AOF文件压缩 """
157 | redis = redis_wrap.get_redis()
158 | redis.bgrewriteaof()
159 |
160 |
161 | # 如果队列上没有这个定时任务,就加上。自动定时压缩reids
162 | if not has_cron(bgrewriteaof):
163 | add_cron({'hour':1}, bgrewriteaof)
164 |
165 | #. 任务串行 ::
166 |
167 | from ztq_core import prepare_task
168 | # 根据(方法,参数)生成一个任务
169 | callback = prepare_task(send, body)
170 | # 执行完 send_mail 之后队列会自动将callback 放入指定的队列
171 | send_mail(body, ztq_callback=callback)
172 |
173 | #. 异常处理 ::
174 |
175 | from ztq_core import prepare_task
176 |
177 | @async(queue='mail')
178 | def fail_callback(return_code, return_msg):
179 | print return_code, return_msg
180 |
181 | fcallback = prepare_task(send2)
182 |
183 | # 如果任务 send 抛出了任何异常,都会将fcallback 放入指定队列
184 | send(body, ztq_fcallback=fcallback)
185 |
186 | #. 进度回调 ::
187 |
188 | import ztq_worker
189 | @async(queue='doc2pdf')
190 | def doc2pdf(filename):
191 | ...
192 | # 可被进度回调函数调用
193 | ztq_worker.report_progress(page=2)
194 | ...
195 |
196 | from ztq_core import prepare_task
197 | pcallback = prepare_task(send2, body)
198 | doc2pdf(filename, ztq_pcallback=pcallback)
199 |
200 | #. 批处理 ::
201 |
202 | # 为提升性能,需要多个xapian索引操作,一次性提交数据库
203 | @async(queue=‘xapian’)
204 | def index(data):
205 | pass
206 |
207 | def do_commit():
208 | xapian_conn.commit()
209 |
210 | # 每执行20个索引任务之后,一次性提交数据库
211 | # 不够20个,但队列空的时候,也会提交
212 | register_batch_queue(‘xapian’, 20, batch_func=do_commit)
213 |
214 |
--------------------------------------------------------------------------------
/ztq_worker/README.txt:
--------------------------------------------------------------------------------
1 |
2 | ZTQ:Zopen Task Queue
3 | ===========================================
4 |
5 | 简介
6 | --------------------
7 |
8 | ZTQ 队列服务, 分为3个包:ztq_core, ztq_worker, ztq_console。默认使用redis作为队列的后端。
9 |
10 | ztq_core ::
11 |
12 | 提供一系列的方法把任务push到队列中,由ztq_worker去获取队列任务并且执行。
13 |
14 | 你可以在这里找到它: http://pypi.python.org/pypi/ztq_core/
15 |
16 | ztq_worker::
17 |
18 | 队列的接收端,以线程为单位阻塞式的去监视一个队列。每一个线程称为Worker
19 | 当有任务push到了队列中,相应的Worker会自动pull下来去执行。
20 |
21 | 你可以在这里找到它: http://pypi.python.org/pypi/ztq_worker/
22 |
23 | ztq_console::
24 |
25 | 对每一个队列的每一个任务执行情况进行监控、下达指令。这个包是可选的
26 |
27 | 你可以在这里找到它: http://pypi.python.org/pypi/ztq_console/
28 |
29 | 关于 ZTQ
30 | --------------------
31 | ::
32 |
33 | * 开源, 使用MIT 许可
34 | * 基于Python, 容易使用和修改
35 | * 支持linux 和 windows
36 | * 可靠,可以应付突然断电等情况
37 | * 可管理,自身带有ztq_console 监控后台
38 | * 灵活,可以在不同的机器上运行多个Worker, 并且随时热插拔Worker
39 | * 使用简单
40 |
41 | 安装
42 | --------------------
43 | ::
44 |
45 | pip install ztq_core
46 | pip install ztq_worker
47 | pip install ztq_console
48 |
49 | 使用
50 | -------------------
51 |
52 | #. 先定义一个普通的任务 ::
53 |
54 | # my_send.py
55 |
56 | def send(body):
57 | print ‘START: ‘, body
58 | sleep(5)
59 | print ‘END:’, body
60 |
61 |
62 | def send2(body):
63 | print ‘START2’, body
64 | raise Exception(‘connection error’)
65 |
66 |
67 | #. 将普通的任务改成队列任务 ::
68 |
69 | # my_send.py
70 |
71 | import time
72 | from ztq_core import async
73 |
74 | @async # 使用默认队列default
75 | def send(body):
76 | print ‘START: ‘, body
77 | sleep(5)
78 | print ‘END:’, body
79 |
80 | @async(queue=‘mail’) # 使用队列mail
81 | def send(body):
82 | print ‘START2’, body
83 | raise Exception(‘connection error’)
84 |
85 |
86 | #. 运行worker ::
87 |
88 | # 运行:bin/ztq_worker app.ini
89 |
90 | # app.ini 例子, 在ztq_worker 包里面有个config 目录放有app.ini 这个文件
91 |
92 | [server]
93 | host = localhost
94 | port = 6379
95 | db = 0
96 | alias = w01
97 | active_config = false
98 | modules = my_send # 所有需要import的任务模块,每个一行
99 |
100 | [queues]
101 | default= 0 # default队列,起1个处理线程
102 | mail = 0, 0 # mail队列,起2个处理线程
103 |
104 | [log]
105 | handler_file = ./ztq_worker.log
106 | level = ERROR
107 |
108 | #. 运行 ::
109 |
110 | import ztq_core
111 | from my_send import send
112 |
113 | # 设置 Redis 连接
114 | ztq_core.setup_redis(‘default’, ‘localhost’, 6379, 0)
115 |
116 | send(‘hello, world’)
117 |
118 | # 动态指定queue
119 | send(‘hello world from mail’, ztq_queue=‘mail’)
120 |
121 | #. 更详细的测试例子可见ztq_core包下的demo.py
122 |
123 | 使用更高级的特征
124 | --------------------------
125 |
126 | #. 抢占式执行 ::
127 |
128 | # 后插入先执行。如果任务已经在队列,会优先
129 | send (body, ztq_first=True)
130 |
131 | #. 探测任务状态 ::
132 |
133 | # ztq_first存在就优先, ztq_run不存在就运行
134 | # 返回的是"running" 代表正在运行, 是"queue" 代表正在排队
135 | # 如果是"error" 代表出错, 是"none" 代表这个任务不在排队,也没在执行
136 | ping_task(send, body, ztq_first=True, ztq_run=True)
137 |
138 | #. 支持事务 ::
139 |
140 | import transaction
141 | ztq_core.enable_transaction(True)
142 | send_mail(from1, to1, body1)
143 | send_mail(from2, to2, body2)
144 | transaction.commit()
145 | # 也可以单独关闭事务
146 | send_mail(from2, to2, body2, ztq_transaction=False)
147 |
148 | #. 定时任务 ::
149 |
150 | from ztq_core.async import async
151 | from ztq_core import redis_wrap
152 | from ztq_core.cron import has_cron, add_cron_job
153 |
154 | @async(queue='clock-0')
155 | def bgrewriteaof():
156 | """ 将redis的AOF文件压缩 """
157 | redis = redis_wrap.get_redis()
158 | redis.bgrewriteaof()
159 |
160 |
161 | # 如果队列上没有这个定时任务,就加上。自动定时压缩reids
162 | if not has_cron(bgrewriteaof):
163 | add_cron({'hour':1}, bgrewriteaof)
164 |
165 | #. 任务串行 ::
166 |
167 | from ztq_core import prepare_task
168 | # 根据(方法,参数)生成一个任务
169 | callback = prepare_task(send, body)
170 | # 执行完 send_mail 之后队列会自动将callback 放入指定的队列
171 | send_mail(body, ztq_callback=callback)
172 |
173 | #. 异常处理 ::
174 |
175 | from ztq_core import prepare_task
176 |
177 | @async(queue='mail')
178 | def fail_callback(return_code, return_msg):
179 | print return_code, return_msg
180 |
181 | fcallback = prepare_task(send2)
182 |
183 | # 如果任务 send 抛出了任何异常,都会将fcallback 放入指定队列
184 | send(body, ztq_fcallback=fcallback)
185 |
186 | #. 进度回调 ::
187 |
188 | import ztq_worker
189 | @async(queue='doc2pdf')
190 | def doc2pdf(filename):
191 | ...
192 | # 可被进度回调函数调用
193 | ztq_worker.report_progress(page=2)
194 | ...
195 |
196 | from ztq_core import prepare_task
197 | pcallback = prepare_task(send2, body)
198 | doc2pdf(filename, ztq_pcallback=pcallback)
199 |
200 | #. 批处理 ::
201 |
202 | # 为提升性能,需要多个xapian索引操作,一次性提交数据库
203 | @async(queue=‘xapian’)
204 | def index(data):
205 | pass
206 |
207 | def do_commit():
208 | xapian_conn.commit()
209 |
210 | # 每执行20个索引任务之后,一次性提交数据库
211 | # 不够20个,但队列空的时候,也会提交
212 | register_batch_queue(‘xapian’, 20, batch_func=do_commit)
213 |
214 |
--------------------------------------------------------------------------------
/ztq_core/ztq_core/async.py:
--------------------------------------------------------------------------------
1 | # -*- encoding:utf-8 -*-
2 |
3 | import types
4 | from task import register, push_task, has_task, gen_task, push_buffer_task
5 | import transaction
6 |
7 | use_transaction = False
8 |
9 | def _setup_callback(kw):
10 | callback = kw.pop('ztq_callback', None)
11 | if callback is not None:
12 | callback_func, callback_args, callback_kw = callback
13 | callback_queue = callback_kw.pop('ztq_queue', callback_func._ztq_queue)
14 | kw.update({'ztq_callback':"%s:%s" % (callback_queue, callback_func.__raw__.__name__),
15 | 'ztq_callback_args':callback_args,
16 | 'ztq_callback_kw':callback_kw})
17 | fcallback = kw.pop('ztq_fcallback', None)
18 | if fcallback is not None:
19 | callback_func, callback_args, callback_kw = fcallback
20 | callback_queue = callback_kw.pop('ztq_queue', callback_func._ztq_queue)
21 | kw.update({'ztq_fcallback':"%s:%s" % (callback_queue, callback_func.__raw__.__name__),
22 | 'ztq_fcallback_args':callback_args,
23 | 'ztq_fcallback_kw':callback_kw})
24 | pcallback = kw.pop('ztq_pcallback', None)
25 | if pcallback is not None:
26 | callback_func, callback_args, callback_kw = pcallback
27 | callback_queue = callback_kw.pop('ztq_queue', callback_func._ztq_queue)
28 | kw.update({'ztq_pcallback':"%s:%s" % (callback_queue, callback_func.__raw__.__name__),
29 | 'ztq_pcallback_args':callback_args,
30 | 'ztq_pcallback_kw':callback_kw})
31 |
32 | def push_task_to_queue(task_name, args, kw, on_commit=False, buffer=False):
33 | if on_commit:
34 | if buffer:
35 | add_after_commit_hook(push_buffer_task, (task_name,) + args, kw)
36 | else:
37 | add_after_commit_hook(push_task, (task_name,) + args, kw)
38 | else:
39 | if buffer:
40 | push_buffer_task(task_name, *args, **kw)
41 | else:
42 | push_task(task_name, *args, **kw)
43 |
44 | def async(*_args, **_kw):
45 | """ 这是一个decorator,事务提交的时候,提交到job队列,异步执行
46 |
47 | 定义job
48 | =============
49 | 第一种::
50 |
51 | @async
52 | def say_hello(name):
53 | print 'hello, ', name
54 |
55 | 第二种, 预先指定队列执行信息::
56 |
57 | @async(queue='hello_queue', transaction=True)
58 | def say_hello(name):
59 | print 'hello, ', name
60 |
61 | 使用方法
62 | ================
63 | 支持如下几种::
64 |
65 | say_hello('asdfa')
66 | say_hello('asdfa', ztq_queue="asdfa", ztq_transaction=False)
67 |
68 | """
69 | if len(_args) == 1 and not _kw and isinstance(_args[0], types.FunctionType): # 不带参数的形式
70 | func = _args[0]
71 | def new_func1(*args, **kw):
72 | queue_name = kw.pop('ztq_queue', 'default')
73 | buffer = kw.pop('ztq_buffer', False)
74 | on_commit= kw.pop('ztq_transaction', use_transaction)
75 | task_name = "%s:%s" % (queue_name, func.__name__)
76 | _setup_callback(kw)
77 | push_task_to_queue(task_name, args, kw, on_commit=on_commit, buffer=buffer)
78 |
79 | new_func1.__raw__ = func
80 | new_func1._ztq_queue = 'default'
81 | register(func)
82 | return new_func1
83 | else:
84 | _queue_name = _kw.get('queue', 'default')
85 | def _async(func):
86 | def new_func(*args, **kw):
87 | #on_commit= kw.pop('ztq_transaction', _on_commit)
88 | on_commit= kw.pop('ztq_transaction', use_transaction)
89 | queue_name = kw.pop('ztq_queue', _queue_name)
90 | buffer = kw.pop('ztq_buffer', False)
91 | task_name = "%s:%s" % (queue_name, func.__name__)
92 | _setup_callback(kw)
93 | push_task_to_queue(task_name, args, kw, on_commit=on_commit, buffer=buffer)
94 |
95 | new_func.__raw__ = func
96 | new_func._ztq_queue = _queue_name
97 | register(func)
98 | return new_func
99 | return _async
100 |
101 | def prepare_task(func, *args, **kw):
102 | _setup_callback(kw)
103 | return func, args, kw
104 |
105 | def ping_task(func, *args, **kw):
106 | queue_name = kw.pop('ztq_queue', func._ztq_queue)
107 | to_front = kw.pop('ztq_first', False)
108 | on_commit = kw.pop('ztq_transaction', None)
109 | run = kw.pop('ztq_run', False)
110 | task = gen_task(func.__raw__.__name__, *args, **kw)
111 | result = has_task(queue_name, task, to_front=to_front)
112 | if result == 'none' and run:
113 | kw['ztq_queue'] = queue_name
114 | kw['ztq_first'] = to_front
115 | if on_commit is not None:
116 | kw['ztq_transaction'] = on_commit
117 | func(*args, **kw)
118 | return result
119 |
120 | #### 以下代码让队列的任务支持事务
121 | def enable_transaction(enable):
122 | """ 是否支持transaction, 默认不支持 """
123 | global use_transaction
124 | use_transaction = bool(enable)
125 |
126 | def _run_after_commit(success_commit, func, args, kw):
127 | if success_commit:
128 | func(*args, **kw)
129 |
130 | def add_after_commit_hook(func, args, kw):
131 | """ 在事务最后添加一个钩子,让队列任务在事务完成后才做实际的操作
132 | """
133 | if not use_transaction: return
134 | transaction.get().addAfterCommitHook(
135 | _run_after_commit,
136 | (func, args, kw),
137 | )
138 |
--------------------------------------------------------------------------------
/ztq_worker/ztq_worker/command_execute.py:
--------------------------------------------------------------------------------
1 | # -*- encoding: utf-8 -*-
2 |
3 | #from zopen.transform import set_drive_config
4 | from config_manager import CONFIG
5 | from job_thread_manager import JobThreadManager
6 | from buffer_thread import BufferThread
7 | from system_info import get_cpu_style, get_cpu_usage, get_mem_usage
8 | import os
9 | import sys
10 | import traceback
11 | import time
12 | import ztq_core
13 |
14 | # 管理工作线程, 添加线程、删除线程、保存信息
15 | job_thread_manager = JobThreadManager()
16 |
17 | # buffer 线程
18 | buffer_thread_instance = None
19 |
20 | def set_job_threads(config_dict):
21 | """ 根据配置信息和job_thread_manager.threads 的数量差来退出/增加线程
22 | 剩下的修改queue_name, interval
23 | """
24 | tmp_jobs = job_thread_manager.threads.copy()
25 | config = []
26 | # 将config_dict转换格式为dicter = [{'queue':'q01', 'interval':6, }, ]
27 | for queue_name, values in config_dict.items():
28 | for value in values:
29 | dicter = dict( queue=queue_name )
30 | dicter.update(value)
31 | config.append(dicter)
32 |
33 | diff_job = len(config) - len(tmp_jobs)
34 | if diff_job > 0: # 需要增加线程
35 | for i in xrange(diff_job):
36 | conf = config.pop()
37 | job_thread_manager.add(conf['queue'], conf['interval'], conf.get('from_right', True))
38 |
39 | elif diff_job < 0: # 需要退出线程
40 | for key in tmp_jobs.keys():
41 | tmp_jobs.pop(key)
42 | job_thread_manager.stop(key)
43 | diff_job += 1
44 | if diff_job >= 0:
45 | break
46 |
47 | # 剩下的修改queue、interval、from_right, 如果有
48 | for index, job_thread in enumerate(tmp_jobs.values()):
49 | conf = config[index]
50 | job_thread.queue_name = conf['queue']
51 | job_thread.sleep_time = conf['interval']
52 | job_thread.from_right = conf.get('from_right', True)
53 |
54 | def init_job_threads(config_dict, force=True):
55 | # 如果首次注册,根据配置启动工作线程,否则根据之前的配置启动。
56 | set_job_threads(config_dict)
57 |
58 | # 将一些信息补全,让监视界面认为这个worker已经启动
59 | alias = CONFIG['server']['alias']
60 |
61 | # set worker config
62 | worker_config = ztq_core.get_worker_config()
63 | if alias not in worker_config or force:
64 | worker_config[alias] = config_dict
65 |
66 | def set_dirve( from_mime, to_mime, conf):
67 | """ 根据驱动配置, 更改驱动参数 """
68 | #set_drive_config(from_mime, to_mime, conf)
69 | pass
70 |
71 | def report(start_time):
72 | """ 转换器向服务器报告状态 """
73 | cpu_style = get_cpu_style()
74 | cpu_percent = get_cpu_usage()
75 | mem_percent, mem_total = get_mem_usage()
76 | ip = CONFIG['server']['alias']
77 |
78 | traceback_dict = {}
79 | for thread_id, frame in sys._current_frames().items():
80 | traceback_dict[thread_id] = traceback.format_stack(frame)
81 |
82 | # 向服务器报告
83 | return dict( ip=ip,
84 | cpu_style=cpu_style,
85 | cpu_percent=cpu_percent,
86 | mem_total=mem_total,
87 | mem_percent=mem_percent,
88 | cron_running=ztq_core.cron.CRON_RUNNING,
89 | started=start_time,
90 | timestamp=int(time.time()),
91 | traceback=traceback_dict,
92 | )
93 |
94 | def kill_transform(pid, timestamp):
95 | """ 中止 转换 """
96 | kill(pid)
97 |
98 | def cancel_transform(pid, timestamp):
99 | """ 取消 转换 """
100 | kill(pid)
101 |
102 | if not os.sys.platform.startswith('win'):
103 | def kill(pid):
104 | """ kill process by pid for linux """
105 | # XXX 无法杀孙子进程
106 | kill_command = "kill -9 `ps --no-heading --ppid %s|awk '{print $1}'` %s" % (pid, pid)
107 | os.system(kill_command)
108 | else:
109 | def kill(pid):
110 | """ kill process by pid for windows """
111 | kill_command = "taskkill /F /T /pid %s" % pid
112 | os.system(kill_command)
113 |
114 | def start_buffer_thread(buffer_thread_config):
115 | """ 开启一个buffer队列线程,监视所有的buffer队列,
116 | 根据buffer队列对应的job队列拥塞情况, 将buffer队列的任务合适的推送到相应的job队列
117 | """
118 | if not buffer_thread_config: return
119 |
120 | global buffer_thread_instance
121 | if buffer_thread_instance is not None:
122 | buffer_thread_instance.stop()
123 |
124 | buffer_thread = BufferThread(buffer_thread_config)
125 | buffer_thread.setDaemon(True)
126 | buffer_thread.start()
127 |
128 | buffer_thread_instance = buffer_thread
129 | sys.stdout.write('start a buffer thread. \n')
130 |
131 | def clear_transform_thread(threads=None):
132 | """ clear job_threads and buffer_thread """
133 | threads = threads or job_thread_manager.threads
134 | names = threads.keys()
135 | job_threads = threads.values()
136 |
137 | # 退出buffer 线程
138 | if buffer_thread_instance is not None:
139 | buffer_thread_instance.stop()
140 | sys.stdout.write('wait the buffer thread stop...\n')
141 |
142 | # 将进程的stop 标志 设置为True
143 | map(job_thread_manager.stop, names)
144 |
145 | # 如果这个线程没有工作,只是在阻塞等待任务,就发送一个空的任务
146 | # 让这个线程立刻结束
147 | for job_thread in job_threads:
148 | if job_thread.start_job_time == 0:
149 | queue_name = job_thread.queue_name
150 | queue = ztq_core.get_task_queue(queue_name)
151 | queue.push('')
152 |
153 | # 等待线程退出
154 | for job_thread in job_threads:
155 | sys.stdout.write('wait the %s stop...\n'%job_thread.getName())
156 | job_thread.join(30)
157 |
158 | import atexit
159 | atexit.register(clear_transform_thread) # 系统退出后清理工作线程
160 |
161 |
--------------------------------------------------------------------------------
/ztq_core/README.md:
--------------------------------------------------------------------------------
1 | ZTQ:Z Task Queue
2 | ===========================================
3 | ZTQ是python语言的一个开源异步队列服务, 使用redis作为队列的存储和通讯。
4 |
5 | 和其他队列服务不同,ZTQ的设计目标是:
6 |
7 | - 实现简单
8 | - 容易使用
9 | - 可靠
10 | - 错误、拥塞时,可管理
11 | - 容易调试
12 | - 灵活调度,高效利用服务器
13 |
14 | 详细介绍可参看: https://github.com/everydo/ztq/raw/master/about-ztq.pptx
15 |
16 | ZTQ是由易度云办公(http://everydo.com) 赞助开发的,在易度云查看和易度文档管理等系统中广泛使用。
17 |
18 | 主要作者和维护人:
19 |
20 | - 徐陶哲 http://weibo.com/xutaozhe
21 | - 潘俊勇 http://weibo.com/panjunyong
22 |
23 | 安装
24 | --------------------
25 | 包括4个包:
26 |
27 | 1. ztq_core: 提供队列操作的底层操作API
28 | 2. ztq_worker: 队列的处理服务
29 | 3. ztq_console:队列的监控后台服务(使用Pyramid开发),这个包是可选运行的
30 | 4. ztq_demo: 一个demo示例
31 |
32 | 可直接使用标准的pip进行安装:
33 |
34 | pip install ztq_core
35 | pip install ztq_worker
36 | pip install ztq_console
37 |
38 | 使用
39 | -------------------
40 | 详细的测试例子可见 ztq_demo包
41 |
42 | 1. 先定义一个普通的任务
43 |
44 | import time
45 |
46 | def send(body):
47 | print 'START: ', body
48 | time.sleep(5)
49 | print 'END:’, body
50 |
51 | def send2(body):
52 | print 'START2', body
53 | raise Exception('connection error')
54 |
55 | 2. 将普通的任务改成队列任务
56 |
57 | import time
58 | from ztq_core import async
59 |
60 | @async # 使用默认队列default
61 | def send(body):
62 | print 'START: ', body
63 | time.sleep(5)
64 | print 'END:', body
65 |
66 | @async(queue='mail') # 使用队列mail
67 | def send2(body):
68 | print 'START2', body
69 | raise Exception('connection error')
70 |
71 | 3. 运行worker
72 |
73 | 通过这个命令运行worker
74 |
75 | bin/ztq_worker worker.ini
76 |
77 | 下面是 worker.ini 例子:
78 |
79 | [server]
80 | host = localhost
81 | port = 6379
82 | db = 0
83 | alias = w01
84 | active_config = false
85 | modules = ztq_demo.tasks # 所有需要import的任务模块,每个一行
86 |
87 | [queues]
88 | default= 0 # default队列,起1个处理线程
89 | mail = 0, 0 # mail队列,起2个处理线程
90 |
91 | [log]
92 | handler_file = ./ztq_worker.log
93 | level = ERROR
94 |
95 | 4. 运行
96 |
97 | import ztq_core
98 | from ztq_demo.tasks import send
99 |
100 | # 设置 Redis 连接
101 | ztq_core.setup_redis('default', 'localhost', 6379, 0)
102 |
103 | send('hello, world')
104 |
105 | # 动态指定queue
106 | send('hello world from mail', ztq_queue='mail')
107 |
108 | 启动监控后台
109 | --------------------
110 |
111 | bin/pserve app.ini
112 |
113 | 更高级的特性
114 | --------------------------
115 |
116 | 1. 抢占式执行
117 |
118 | 后插入先执行。如果任务已经在队列,会优先
119 |
120 | send (body, ztq_first=True)
121 |
122 | 2. 探测任务状态
123 |
124 | ping_task(send, body, ztq_first=True, ztq_run=True)
125 |
126 | 任务存在如下状态:
127 |
128 | * running: 代表正在运行,
129 | * queue: 代表正在排队
130 | * error: 代表出错
131 | * none: 代表这个任务不在排队,也没在执行
132 |
133 | 参数:
134 |
135 | - ztq_first:存在就优先
136 | - ztq_run:不存在就运行
137 |
138 | 3. 支持事务
139 |
140 | import transaction
141 | ztq_core.enable_transaction(True)
142 | send_mail(from1, to1, body1)
143 | send_mail(from2, to2, body2)
144 | transaction.commit()
145 | # 也可以单独关闭事务
146 | send_mail(from2, to2, body2, ztq_transaction=False)
147 |
148 | 4. 定时任务
149 |
150 | from ztq_core.async import async
151 | from ztq_core import redis_wrap
152 | from ztq_core.cron import has_cron, add_cron_job
153 |
154 | @async(queue='clock-0')
155 | def bgrewriteaof():
156 | """ 将redis的AOF文件压缩 """
157 | redis = redis_wrap.get_redis()
158 | redis.bgrewriteaof()
159 |
160 |
161 | # 如果队列上没有这个定时任务,就加上。自动定时压缩reids
162 | if not has_cron(bgrewriteaof):
163 | add_cron({'hour':1}, bgrewriteaof)
164 |
165 | 5. 任务串行
166 |
167 | from ztq_core import prepare_task
168 | # 根据(方法,参数)生成一个任务
169 | callback = prepare_task(send, body)
170 | # 执行完 send_mail 之后队列会自动将callback 放入指定的队列
171 | send_mail(body, ztq_callback=callback)
172 |
173 | 6. 异常处理
174 |
175 | from ztq_core import prepare_task
176 |
177 | @async(queue='mail')
178 | def fail_callback(return_code, return_msg):
179 | print return_code, return_msg
180 |
181 | fcallback = prepare_task(send2)
182 |
183 | # 如果任务 send 抛出了任何异常,都会将fcallback 放入指定队列
184 | send(body, ztq_fcallback=fcallback)
185 |
186 | 7. 进度回调
187 |
188 | import ztq_worker
189 | @async(queue='doc2pdf')
190 | def doc2pdf(filename):
191 | ...
192 | # 可被进度回调函数调用
193 | ztq_worker.report_progress(page=2)
194 | ...
195 |
196 | from ztq_core import prepare_task
197 | pcallback = prepare_task(send2, body)
198 | doc2pdf(filename, ztq_pcallback=pcallback)
199 |
200 | 8. 批处理
201 |
202 | # 为提升性能,需要多个xapian索引操作,一次性提交数据库
203 | @async(queue=‘xapian’)
204 | def index(data):
205 | pass
206 |
207 | def do_commit():
208 | xapian_conn.commit()
209 |
210 | # 每执行20个索引任务之后,一次性提交数据库
211 | # 不够20个,但队列空的时候,也会提交
212 | register_batch_queue(‘xapian’, 20, batch_func=do_commit)
213 |
214 |
--------------------------------------------------------------------------------
/ztq_worker/README.md:
--------------------------------------------------------------------------------
1 | ZTQ:Z Task Queue
2 | ===========================================
3 | ZTQ是python语言的一个开源异步队列服务, 使用redis作为队列的存储和通讯。
4 |
5 | 和其他队列服务不同,ZTQ的设计目标是:
6 |
7 | - 实现简单
8 | - 容易使用
9 | - 可靠
10 | - 错误、拥塞时,可管理
11 | - 容易调试
12 | - 灵活调度,高效利用服务器
13 |
14 | 详细介绍可参看: https://github.com/everydo/ztq/raw/master/about-ztq.pptx
15 |
16 | ZTQ是由易度云办公(http://everydo.com) 赞助开发的,在易度云查看和易度文档管理等系统中广泛使用。
17 |
18 | 主要作者和维护人:
19 |
20 | - 徐陶哲 http://weibo.com/xutaozhe
21 | - 潘俊勇 http://weibo.com/panjunyong
22 |
23 | 安装
24 | --------------------
25 | 包括4个包:
26 |
27 | 1. ztq_core: 提供队列操作的底层操作API
28 | 2. ztq_worker: 队列的处理服务
29 | 3. ztq_console:队列的监控后台服务(使用Pyramid开发),这个包是可选运行的
30 | 4. ztq_demo: 一个demo示例
31 |
32 | 可直接使用标准的pip进行安装:
33 |
34 | pip install ztq_core
35 | pip install ztq_worker
36 | pip install ztq_console
37 |
38 | 使用
39 | -------------------
40 | 详细的测试例子可见 ztq_demo包
41 |
42 | 1. 先定义一个普通的任务
43 |
44 | import time
45 |
46 | def send(body):
47 | print 'START: ', body
48 | time.sleep(5)
49 | print 'END:’, body
50 |
51 | def send2(body):
52 | print 'START2', body
53 | raise Exception('connection error')
54 |
55 | 2. 将普通的任务改成队列任务
56 |
57 | import time
58 | from ztq_core import async
59 |
60 | @async # 使用默认队列default
61 | def send(body):
62 | print 'START: ', body
63 | time.sleep(5)
64 | print 'END:', body
65 |
66 | @async(queue='mail') # 使用队列mail
67 | def send2(body):
68 | print 'START2', body
69 | raise Exception('connection error')
70 |
71 | 3. 运行worker
72 |
73 | 通过这个命令运行worker
74 |
75 | bin/ztq_worker worker.ini
76 |
77 | 下面是 worker.ini 例子:
78 |
79 | [server]
80 | host = localhost
81 | port = 6379
82 | db = 0
83 | alias = w01
84 | active_config = false
85 | modules = ztq_demo.tasks # 所有需要import的任务模块,每个一行
86 |
87 | [queues]
88 | default= 0 # default队列,起1个处理线程
89 | mail = 0, 0 # mail队列,起2个处理线程
90 |
91 | [log]
92 | handler_file = ./ztq_worker.log
93 | level = ERROR
94 |
95 | 4. 运行
96 |
97 | import ztq_core
98 | from ztq_demo.tasks import send
99 |
100 | # 设置 Redis 连接
101 | ztq_core.setup_redis('default', 'localhost', 6379, 0)
102 |
103 | send('hello, world')
104 |
105 | # 动态指定queue
106 | send('hello world from mail', ztq_queue='mail')
107 |
108 | 启动监控后台
109 | --------------------
110 |
111 | bin/pserve app.ini
112 |
113 | 更高级的特性
114 | --------------------------
115 |
116 | 1. 抢占式执行
117 |
118 | 后插入先执行。如果任务已经在队列,会优先
119 |
120 | send (body, ztq_first=True)
121 |
122 | 2. 探测任务状态
123 |
124 | ping_task(send, body, ztq_first=True, ztq_run=True)
125 |
126 | 任务存在如下状态:
127 |
128 | * running: 代表正在运行,
129 | * queue: 代表正在排队
130 | * error: 代表出错
131 | * none: 代表这个任务不在排队,也没在执行
132 |
133 | 参数:
134 |
135 | - ztq_first:存在就优先
136 | - ztq_run:不存在就运行
137 |
138 | 3. 支持事务
139 |
140 | import transaction
141 | ztq_core.enable_transaction(True)
142 | send_mail(from1, to1, body1)
143 | send_mail(from2, to2, body2)
144 | transaction.commit()
145 | # 也可以单独关闭事务
146 | send_mail(from2, to2, body2, ztq_transaction=False)
147 |
148 | 4. 定时任务
149 |
150 | from ztq_core.async import async
151 | from ztq_core import redis_wrap
152 | from ztq_core.cron import has_cron, add_cron_job
153 |
154 | @async(queue='clock-0')
155 | def bgrewriteaof():
156 | """ 将redis的AOF文件压缩 """
157 | redis = redis_wrap.get_redis()
158 | redis.bgrewriteaof()
159 |
160 |
161 | # 如果队列上没有这个定时任务,就加上。自动定时压缩reids
162 | if not has_cron(bgrewriteaof):
163 | add_cron({'hour':1}, bgrewriteaof)
164 |
165 | 5. 任务串行
166 |
167 | from ztq_core import prepare_task
168 | # 根据(方法,参数)生成一个任务
169 | callback = prepare_task(send, body)
170 | # 执行完 send_mail 之后队列会自动将callback 放入指定的队列
171 | send_mail(body, ztq_callback=callback)
172 |
173 | 6. 异常处理
174 |
175 | from ztq_core import prepare_task
176 |
177 | @async(queue='mail')
178 | def fail_callback(return_code, return_msg):
179 | print return_code, return_msg
180 |
181 | fcallback = prepare_task(send2)
182 |
183 | # 如果任务 send 抛出了任何异常,都会将fcallback 放入指定队列
184 | send(body, ztq_fcallback=fcallback)
185 |
186 | 7. 进度回调
187 |
188 | import ztq_worker
189 | @async(queue='doc2pdf')
190 | def doc2pdf(filename):
191 | ...
192 | # 可被进度回调函数调用
193 | ztq_worker.report_progress(page=2)
194 | ...
195 |
196 | from ztq_core import prepare_task
197 | pcallback = prepare_task(send2, body)
198 | doc2pdf(filename, ztq_pcallback=pcallback)
199 |
200 | 8. 批处理
201 |
202 | # 为提升性能,需要多个xapian索引操作,一次性提交数据库
203 | @async(queue=‘xapian’)
204 | def index(data):
205 | pass
206 |
207 | def do_commit():
208 | xapian_conn.commit()
209 |
210 | # 每执行20个索引任务之后,一次性提交数据库
211 | # 不够20个,但队列空的时候,也会提交
212 | register_batch_queue(‘xapian’, 20, batch_func=do_commit)
213 |
214 |
--------------------------------------------------------------------------------
/ztq_console/README.md:
--------------------------------------------------------------------------------
1 | ZTQ:Z Task Queue
2 | ===========================================
3 | ZTQ是python语言的一个开源异步队列服务, 使用redis作为队列的存储和通讯。
4 |
5 | 和其他队列服务不同,ZTQ的设计目标是:
6 |
7 | - 实现简单
8 | - 容易使用
9 | - 可靠
10 | - 错误、拥塞时,可管理
11 | - 容易调试
12 | - 灵活调度,高效利用服务器
13 |
14 | 详细介绍可参看: https://github.com/everydo/ztq/raw/master/about-ztq.pptx
15 |
16 | ZTQ是由易度云办公(http://everydo.com) 赞助开发的,在易度云查看和易度文档管理等系统中广泛使用。
17 |
18 | 主要作者和维护人:
19 |
20 | - 徐陶哲 http://weibo.com/xutaozhe
21 | - 潘俊勇 http://weibo.com/panjunyong
22 |
23 | 安装
24 | --------------------
25 | 包括4个包:
26 |
27 | 1. ztq_core: 提供队列操作的底层操作API
28 | 2. ztq_worker: 队列的处理服务
29 | 3. ztq_console:队列的监控后台服务(使用Pyramid开发),这个包是可选运行的
30 | 4. ztq_demo: 一个demo示例
31 |
32 | 可直接使用标准的pip进行安装:
33 |
34 | pip install ztq_core
35 | pip install ztq_worker
36 | pip install ztq_console
37 |
38 | 使用
39 | -------------------
40 | 详细的测试例子可见 ztq_demo包
41 |
42 | 1. 先定义一个普通的任务
43 |
44 | import time
45 |
46 | def send(body):
47 | print 'START: ', body
48 | time.sleep(5)
49 | print 'END:’, body
50 |
51 | def send2(body):
52 | print 'START2', body
53 | raise Exception('connection error')
54 |
55 | 2. 将普通的任务改成队列任务
56 |
57 | import time
58 | from ztq_core import async
59 |
60 | @async # 使用默认队列default
61 | def send(body):
62 | print 'START: ', body
63 | time.sleep(5)
64 | print 'END:', body
65 |
66 | @async(queue='mail') # 使用队列mail
67 | def send2(body):
68 | print 'START2', body
69 | raise Exception('connection error')
70 |
71 | 3. 运行worker
72 |
73 | 通过这个命令运行worker
74 |
75 | bin/ztq_worker worker.ini
76 |
77 | 下面是 worker.ini 例子:
78 |
79 | [server]
80 | host = localhost
81 | port = 6379
82 | db = 0
83 | alias = w01
84 | active_config = false
85 | modules = ztq_demo.tasks # 所有需要import的任务模块,每个一行
86 |
87 | [queues]
88 | default= 0 # default队列,起1个处理线程
89 | mail = 0, 0 # mail队列,起2个处理线程
90 |
91 | [log]
92 | handler_file = ./ztq_worker.log
93 | level = ERROR
94 |
95 | 4. 运行
96 |
97 | import ztq_core
98 | from ztq_demo.tasks import send
99 |
100 | # 设置 Redis 连接
101 | ztq_core.setup_redis('default', 'localhost', 6379, 0)
102 |
103 | send('hello, world')
104 |
105 | # 动态指定queue
106 | send('hello world from mail', ztq_queue='mail')
107 |
108 | 启动监控后台
109 | --------------------
110 |
111 | bin/pserve app.ini
112 |
113 | 更高级的特性
114 | --------------------------
115 |
116 | 1. 抢占式执行
117 |
118 | 后插入先执行。如果任务已经在队列,会优先
119 |
120 | send (body, ztq_first=True)
121 |
122 | 2. 探测任务状态
123 |
124 | ping_task(send, body, ztq_first=True, ztq_run=True)
125 |
126 | 任务存在如下状态:
127 |
128 | * running: 代表正在运行,
129 | * queue: 代表正在排队
130 | * error: 代表出错
131 | * none: 代表这个任务不在排队,也没在执行
132 |
133 | 参数:
134 |
135 | - ztq_first:存在就优先
136 | - ztq_run:不存在就运行
137 |
138 | 3. 支持事务
139 |
140 | import transaction
141 | ztq_core.enable_transaction(True)
142 | send_mail(from1, to1, body1)
143 | send_mail(from2, to2, body2)
144 | transaction.commit()
145 | # 也可以单独关闭事务
146 | send_mail(from2, to2, body2, ztq_transaction=False)
147 |
148 | 4. 定时任务
149 |
150 | from ztq_core.async import async
151 | from ztq_core import redis_wrap
152 | from ztq_core.cron import has_cron, add_cron_job
153 |
154 | @async(queue='clock-0')
155 | def bgrewriteaof():
156 | """ 将redis的AOF文件压缩 """
157 | redis = redis_wrap.get_redis()
158 | redis.bgrewriteaof()
159 |
160 |
161 | # 如果队列上没有这个定时任务,就加上。自动定时压缩reids
162 | if not has_cron(bgrewriteaof):
163 | add_cron({'hour':1}, bgrewriteaof)
164 |
165 | 5. 任务串行
166 |
167 | from ztq_core import prepare_task
168 | # 根据(方法,参数)生成一个任务
169 | callback = prepare_task(send, body)
170 | # 执行完 send_mail 之后队列会自动将callback 放入指定的队列
171 | send_mail(body, ztq_callback=callback)
172 |
173 | 6. 异常处理
174 |
175 | from ztq_core import prepare_task
176 |
177 | @async(queue='mail')
178 | def fail_callback(return_code, return_msg):
179 | print return_code, return_msg
180 |
181 | fcallback = prepare_task(send2)
182 |
183 | # 如果任务 send 抛出了任何异常,都会将fcallback 放入指定队列
184 | send(body, ztq_fcallback=fcallback)
185 |
186 | 7. 进度回调
187 |
188 | import ztq_worker
189 | @async(queue='doc2pdf')
190 | def doc2pdf(filename):
191 | ...
192 | # 可被进度回调函数调用
193 | ztq_worker.report_progress(page=2)
194 | ...
195 |
196 | from ztq_core import prepare_task
197 | pcallback = prepare_task(send2, body)
198 | doc2pdf(filename, ztq_pcallback=pcallback)
199 |
200 | 8. 批处理
201 |
202 | # 为提升性能,需要多个xapian索引操作,一次性提交数据库
203 | @async(queue=‘xapian’)
204 | def index(data):
205 | pass
206 |
207 | def do_commit():
208 | xapian_conn.commit()
209 |
210 | # 每执行20个索引任务之后,一次性提交数据库
211 | # 不够20个,但队列空的时候,也会提交
212 | register_batch_queue(‘xapian’, 20, batch_func=do_commit)
213 |
214 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/templates/worker.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | 下面是各个工作端的当前工作情况:
10 |
11 |
12 |
13 |
14 | 主机名称
15 | IP
16 |
17 | CPU 占用
18 | 内存占用
19 | 启动时间
20 | 上报时间
21 |
22 | 状态
23 | 定时器
24 | 操作
25 |
26 |
27 | 当前工作
28 |
29 |
30 |
31 | {% if workers %}
32 | {% for worker in workers %}
33 |
34 | {{ worker['_worker_name'] }}
35 | {{ worker['ip'] }}
36 |
37 | {{ worker['cpu_percent'] }}
38 | 共 {{ worker['mem_total'] }} 已用 {{ worker['mem_percent'] }}
39 | {{ worker['_started'] }}
40 |
41 | {{ worker['_timestamp'] }}
42 |
43 | {% if worker['_active'] == 'shutdown' %}
44 | 下线
45 | {% elif worker['_active'] == 'ldle' %}
46 | 空闲
47 | {% elif worker['_active'] == 'work' %}
48 | 运转
49 | {% endif %}
50 |
51 |
52 | {% if worker['cron_running'] %}
53 | 运行
54 | {% else %}
55 | 停止
56 | {% endif %}
57 |
58 | {% if worker['_active'] == 'work' %}
59 |
60 | 刷新
61 | 停止
62 |
63 | {% else %}
64 |
65 | 启用
66 | 删除
67 |
68 | {% endif %}
69 |
70 |
71 |
72 | {% for thread in worker['_threads'] %}
73 |
85 | {% endfor %}
86 |
87 |
88 | {% endfor %}
89 | {% endif %}
90 |
91 |
92 |
93 |
94 |
95 | 定时任务清单:
96 |
97 |
98 |
99 |
100 | 执行时间
101 | 任务
102 | 队列
103 | 参数
104 | 附加参数
105 |
106 |
107 |
108 | {% for cron in crons %}
109 |
110 | {{cron['cron_info'].get('hour', 0)}} 时 {{cron['cron_info'].get('minute', 0)}} 分
111 | {{cron['func_name']}}
112 | {{cron['queue']}}
113 | {{ ', '.join(cron['args'])}}
114 | {{ ', '.join(cron['kw'])}}
115 |
116 | {% endfor %}
117 |
118 |
119 |
120 |
121 |
122 |
--------------------------------------------------------------------------------
/ztq_core/ztq_core/model.py:
--------------------------------------------------------------------------------
1 | #coding:utf-8
2 | from redis_wrap import get_set, get_key, set_key, \
3 | get_queue, get_dict, get_keys, get_limit_queue, get_hash
4 |
5 | def get_all_task_queue():
6 | """返回所有原子队列的key
7 | 返回类型:list
8 | """
9 | task_queue = "ztq:queue:task:"
10 | return get_keys(task_queue)
11 |
12 | def get_all_error_queue():
13 | """返回所有原子队列的key
14 | 返回类型:list
15 | """
16 | task_queue = "ztq:queue:error:"
17 | return get_keys(task_queue)
18 |
19 | def get_task_hash(queue_name, system='default'):
20 | """ 得到 一个 task_md5 -> task 的字典对象 """
21 | return get_hash('ztq:hash:task:' + queue_name, system=system)
22 |
23 | def get_task_set(queue_name, serialized_type='json'):
24 | """ 得到 一个 task_md5 -> task 的字典对象 """
25 | return get_set('ztq:set:task:' + queue_name, serialized_type=serialized_type)
26 |
27 | def get_task_queue(queue_name, system='default'):
28 | """根据传入参数queue_name
29 |
30 | {"func":'transform',
31 | 'args':(),
32 | 'kw':{'path':'c:\\abc.doc', #源文件路径
33 | 'mime':'application/ms-word', #源文件类型
34 | 'transform':[
35 | {'path':'d:\\abc.html',
36 | 'mime':'text/html',
37 | 'callback':'http://xxx.com/asss'
38 |
39 | 'transform':[
40 | {'path':'d:\\abc.txt',
41 | 'mime':'text/plain',
42 | 'transform':[]
43 | }, # 转换子文件放在同一文件夹中
44 | ]}]},
45 |
46 | 'callback':callback, # 全部完成调用方法
47 | 'callback_args':callback_args, # 全部完成的调用参数
48 | 'callback_kw':callback_kw,
49 |
50 | 'pcallback':callback, # progress callback 部分完成的调用
51 | 'pcallback_args':callback_args, # 部分完成的调用参数
52 | 'pcallback_kw':callback_kw, # 部分完成调用参数
53 |
54 | 'fcallback':callback, # 失败调用方法
55 | 'fcallback_args':callback_args, # 失败的调用参数
56 | 'fcallback_kw':callback_kw,
57 |
58 | "runtime":{ # 队列运行相关信息
59 | 'created':12323423 # 进入队列时间
60 | }
61 | }
62 | """
63 | #ListFu
64 | atom_queue = "ztq:queue:task:" + queue_name
65 | return get_queue(atom_queue, system=system, serialized_type='string')
66 |
67 | def get_command_queue(name):
68 | """ 同步配置、状态报告、杀死转换线程
69 |
70 | 要求同步worker配置
71 | {
72 | 'command':'updateworker',
73 | 'timestamp':''
74 | }
75 | 要求同步转换器线程驱动
76 | {
77 | 'command':'updatedriver',
78 | 'timestamp':''
79 | }
80 |
81 | 要求worker报告整体工作状态::
82 |
83 | {
84 | 'command':'report',
85 | 'timestamp':
86 | }
87 | 后台杀一个转换进程(可能卡死)::
88 | {
89 | 'command':'kill',
90 | 'timestamp':
91 | 'pid':'2121',
92 | }
93 |
94 | 用户取消一个转换进程,和杀死类似,不应该进入错误队列,日志也需要说明是取消::
95 | {
96 | 'command':'cancel',
97 | 'timestamp':
98 | 'pid':'2121',
99 | }
100 | """
101 | command_queue = 'ztq:queue:command:'+name
102 | return get_queue(command_queue)
103 |
104 | def get_work_log_queue():
105 | """ json格式为::
106 |
107 | {'func':'transform',
108 | 'kw':{ ... # 和前面task_queue相同
109 | },
110 | "runtime":{ # 队列运行相关信息
111 | 'created':12323423 #进入原始队列时间
112 | 'queue':'q01' # 是在哪个原子原子队列
113 | 'start':123213123 #转换开始时间
114 | 'end':123213123 #转换结束时间
115 | 'worker':'w01', # 转换器名
116 | 'thread':'131231', #
117 | 'return':-1, # 返回的错误代号, 0表示成功
118 | 'reason':'失败原因' # 详细的原因
119 | }
120 | }
121 | """
122 | work__log_queue = "ztq:queue:worker_log"
123 | return get_limit_queue(work__log_queue, 200)
124 |
125 | def get_error_hash(queue_name, system='default'):
126 | """ json格式和work_log相同 """
127 | error_queue = 'ztq:hash:error:' + queue_name
128 | return get_hash(error_queue, system=system)
129 |
130 | def get_error_queue(queue_name, system='default'):
131 | """ json格式和work_log相同 """
132 | error_queue = 'ztq:queue:error:' + queue_name
133 | return get_queue(error_queue, system=system, serialized_type='string')
134 |
135 | def get_buffer_queue(queue_name, system='default'):
136 | """ json格式和work_log相同 """
137 | buffer_queue = 'ztq:queue:buffer:' + queue_name
138 | return get_queue(buffer_queue, system=system)
139 |
140 | def get_system_log_queue():
141 | """
142 | Json格式为:
143 | {'alias':'w01'
144 | 'host':'192.168.1.100'
145 | 'timestamp':123213123
146 | 'type': 'reboot' or 'shutdown' or 'power' 三个值中其中一个
147 | }
148 | """
149 | system__log_queue ='ztq:queue:system_log'
150 | return get_limit_queue(system__log_queue, 200)
151 |
152 | def get_callback_queue():
153 | callback_queue='ztq:queue:callback'
154 | return get_queue(callback_queue)
155 |
156 | # state -------------------------------------------------------------------
157 | def get_all_worker():
158 | """返回正在运行中的转换器列表
159 | 返回类似:list
160 | """
161 | prefix = 'ztq:state:worker:'
162 | return get_keys(prefix)
163 |
164 | def get_worker_state():
165 | """ transformer在如下2种状况下会,会由指令线程上报转换器的状态::
166 |
167 | - 启动的时候
168 | - 有指令要求
169 |
170 | 在redis中的存放格式为::
171 |
172 | {'ip':'192.168.1.1',
173 | 'cpu_style':'Dural Xommm 1G',
174 | 'cpu_percent':'30%',
175 | 'mem_total':'2G',
176 | 'mem_percent':'60%',
177 | 'started':1231231231,
178 | 'timestamp':12312312,
179 | 'tracebacks':'全部线程的traceback信息,用于死锁检查',
180 | }
181 | 转换器状态信息,主要用于监控转换器是否良性工作,会在监控界面中显示。
182 | """
183 | prefix = 'ztq:state:worker:'
184 | return get_dict(prefix)
185 |
186 | def get_job_state(worker_job_name):
187 | """ 转换器w01,第0号转换线程的当前转换任务信息
188 |
189 | - 每次开始转换,需要记录转换的信息
190 | - 每次结束的时候,需要清空
191 |
192 | json格式为::
193 |
194 | {'func':'transform',
195 | 'kw':{ ... # 和上面task_queue相同
196 | },
197 | 'runtime':{... # 和上面work_log相同
198 | }
199 | 'process':{
200 | 'pid': 212, # -1 表示不能杀
201 | 'start':131231,
202 | 'comment':'d:\ssd.pdf'
203 | }
204 | }
205 | """
206 | prefix = 'ztq:state:job:%s:' % worker_job_name
207 | return get_dict(prefix)
208 |
209 | def get_queue_config():
210 | """记录queue的基本信息
211 | {'title':'sys_cron-0', #可选
212 | 'tags':(['sys_cron']), #可选
213 | } #可选
214 | """
215 | prefix = 'ztq:config:queue:'
216 | return get_dict(prefix)
217 |
218 | def get_worker_config():
219 | """ 配置工作线程:处理哪些队列,几个线程,间隔时间::
220 | {'q01':[{ 'interval':5, # 间隔时间
221 | 'from_right':True, }], # 队列处理的方向,左(l)或者右(r)
222 | 'q02':[{'interval':5, 'from_right':False},
223 | {'interval':3, 'from_right':True}],
224 | }
225 | """
226 | prefix = 'ztq:config:worker:'
227 | return get_dict(prefix)
228 |
229 | def get_driver_config():
230 | """
231 | TODO:消息格式
232 | """
233 | prefix = 'ztq:config:driver:'
234 | return get_dict(prefix)
235 |
236 | def get_cron_set():
237 | """ 定时任务list
238 | TODO
239 | """
240 | return get_set('ztq:set:cron')
241 |
242 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ZTQ:Z Task Queue
2 | ===========================================
3 | ZTQ是python语言的一个开源异步队列服务, 使用redis作为队列的存储和通讯。
4 |
5 | 和其他队列服务不同,ZTQ的设计目标是:
6 |
7 | - 实现简单
8 | - 容易使用
9 | - 可靠
10 | - 错误、拥塞时,可管理
11 | - 容易调试
12 | - 灵活调度,高效利用服务器
13 |
14 | 详细介绍可参看: https://github.com/everydo/ztq/raw/master/about-ztq.pptx
15 |
16 | ZTQ是由易度云办公(http://easydo.cn) 赞助开发的,在易度云查看和易度文档管理等系统中广泛使用。
17 |
18 | 主要作者和维护人:
19 |
20 | - 潘俊勇 http://weibo.com/panjunyong
21 |
22 | 安装
23 | --------------------
24 | 包括4个包:
25 |
26 | 1. ztq_core: 提供队列操作的底层操作API
27 | 2. ztq_worker: 队列的处理服务
28 | 3. ztq_console:队列的监控后台服务(使用Pyramid开发),这个包是可选运行的
29 | 4. ztq_demo: 一个demo示例
30 |
31 | 可直接使用标准的pip进行安装:
32 |
33 | pip install ztq_core
34 | pip install ztq_worker
35 | pip install ztq_console
36 |
37 | 使用
38 | -------------------
39 | 详细的测试例子可见 ztq_demo包
40 |
41 | 1. 先定义一个普通的任务
42 |
43 | import time
44 |
45 | def send(body):
46 | print 'START: ', body
47 | time.sleep(5)
48 | print 'END:’, body
49 |
50 | def send2(body):
51 | print 'START2', body
52 | raise Exception('connection error')
53 |
54 | 2. 将普通的任务改成队列任务
55 |
56 | import time
57 | from ztq_core import async
58 |
59 | @async # 使用默认队列default
60 | def send(body):
61 | print 'START: ', body
62 | time.sleep(5)
63 | print 'END:', body
64 |
65 | @async(queue='mail') # 使用队列mail
66 | def send2(body):
67 | print 'START2', body
68 | raise Exception('connection error')
69 |
70 | 3. 运行worker
71 |
72 | 1.使用virtualenv 建立虚拟环境
73 |
74 | virtualenv ztq_env(并激活虚拟环境)
75 |
76 | 2.安装
77 |
78 | pip install ztq_core
79 | pip install ztq_worker
80 | pip install ztq_console
81 |
82 | 3.在 ztq_worker 目录运行
83 |
84 | python setup.py install
85 |
86 | 4.通过这个命令运行worker
87 |
88 | bin/ztq_worker worker.ini
89 |
90 | 下面是 worker.ini 例子:
91 |
92 | [server]
93 | host = localhost
94 | port = 6379
95 | db = 0
96 | alias = w01
97 | active_config = false
98 | modules = ztq_demo.tasks # 所有需要import的任务模块,每个一行
99 |
100 | [queues]
101 | default= 0 # default队列,起1个处理线程
102 | mail = 0, 0 # mail队列,起2个处理线程
103 |
104 | [log]
105 | handler_file = ./ztq_worker.log
106 | level = ERROR
107 |
108 | 4. 运行
109 |
110 | import ztq_core
111 | from ztq_demo.tasks import send
112 |
113 | # 设置 Redis 连接
114 | ztq_core.setup_redis('default', 'localhost', 6379, 0)
115 |
116 | send('hello, world')
117 |
118 | # 动态指定queue
119 | send('hello world from mail', ztq_queue='mail')
120 |
121 | 启动监控后台
122 | --------------------
123 |
124 | 在 ztq_console 目录下(已激活虚拟环境)
125 | 1.运行 python bootstrap.py
126 |
127 | 2.运行 bin/buildout
128 |
129 | 3.运行 bin/pserve app.ini
130 |
131 | 错误提示:
132 | import paste.script.command
133 | ImportError: No module named script.command
134 |
135 | 更高级的特性
136 | --------------------------
137 |
138 | 1. 抢占式执行
139 |
140 | 后插入先执行。如果任务已经在队列,会优先
141 |
142 | send (body, ztq_first=True)
143 |
144 | 2. 探测任务状态
145 |
146 | ping_task(send, body, ztq_first=True, ztq_run=True)
147 |
148 | 任务存在如下状态:
149 |
150 | * running: 代表正在运行,
151 | * queue: 代表正在排队
152 | * error: 代表出错
153 | * none: 代表这个任务不在排队,也没在执行
154 |
155 | 参数:
156 |
157 | - ztq_first:存在就优先
158 | - ztq_run:不存在就运行
159 |
160 | 3. 支持事务
161 |
162 | import transaction
163 | ztq_core.enable_transaction(True)
164 | send_mail(from1, to1, body1)
165 | send_mail(from2, to2, body2)
166 | transaction.commit()
167 | # 也可以单独关闭事务
168 | send_mail(from2, to2, body2, ztq_transaction=False)
169 |
170 | 4. 定时任务
171 |
172 | from ztq_core.async import async
173 | from ztq_core import redis_wrap
174 | from ztq_core.cron import has_cron, add_cron_job
175 |
176 | @async(queue='clock-0')
177 | def bgrewriteaof():
178 | """ 将redis的AOF文件压缩 """
179 | redis = redis_wrap.get_redis()
180 | redis.bgrewriteaof()
181 |
182 | # 如果队列上没有这个定时任务,就加上。自动定时压缩reids
183 | if not has_cron(bgrewriteaof):
184 | add_cron({'hour':1}, bgrewriteaof)
185 |
186 | # 如果只需执行一次
187 | add_cron({'timestamp':123123123}, bgrewriteaof)
188 |
189 | 5. 延时执行
190 |
191 | # 10秒之后执行sendmail
192 | sendmail(from, to, body, ztq_delay=40)
193 |
194 | 6. 自动重试
195 |
196 | # 定义任务,需要绑定到运行环境,重试3次
197 | @async(bind=True, max_retries=3)
198 | def sendmail(self, form, to, body):
199 | try:
200 | os.sleep(30)
201 | except:
202 | # 10秒时候再试
203 | raise ztq_core.Retry(countdown=10)
204 |
205 | # 重试
206 | sendmail(from, to, body)
207 |
208 | 7. 任务串行
209 |
210 | from ztq_core import prepare_task
211 | # 根据(方法,参数)生成一个任务
212 | callback = prepare_task(send, body)
213 | # 执行完 send_mail 之后队列会自动将callback 放入指定的队列
214 | send_mail(body, ztq_callback=callback)
215 |
216 | 8. 异常处理
217 |
218 | from ztq_core import prepare_task
219 |
220 | @async(queue='mail')
221 | def fail_callback(return_code, return_msg):
222 | print return_code, return_msg
223 |
224 | fcallback = prepare_task(send2)
225 |
226 | # 如果任务 send 抛出了任何异常,都会将fcallback 放入指定队列
227 | send(body, ztq_fcallback=fcallback)
228 |
229 | 9. 进度回调
230 |
231 | import ztq_worker
232 | @async(queue='doc2pdf')
233 | def doc2pdf(filename):
234 | ...
235 | # 可被进度回调函数调用
236 | ztq_worker.report_progress(page=2)
237 | ...
238 |
239 | from ztq_core import prepare_task
240 | pcallback = prepare_task(send2, body)
241 | doc2pdf(filename, ztq_pcallback=pcallback)
242 |
243 | 10. 批处理
244 |
245 | # 为提升性能,需要多个xapian索引操作,一次性提交数据库
246 | @async(queue=‘xapian’)
247 | def index(data):
248 | pass
249 |
250 | def do_commit():
251 | xapian_conn.commit()
252 |
253 | # 每执行20个索引任务之后,一次性提交数据库
254 | # 不够20个,但队列空的时候,也会提交
255 | register_batch_queue(‘xapian’, 20, batch_func=do_commit)
256 |
257 |
258 | 11. 插入到另外的redis数据库
259 |
260 | from ztq_core.redis_wrap import setup_redis
261 | setup_redis('proxy', HOST, PORT, db=0)
262 |
263 | from ztq_core.task import push_task
264 | push_task('doc2pdf:transform', ztq_system='proxy')
265 |
266 |
--------------------------------------------------------------------------------
/ztq_core/ztq_core/task.py:
--------------------------------------------------------------------------------
1 | #coding:utf-8
2 | """ redis任务队列
3 | """
4 |
5 | import model
6 | import time
7 | from threading import Thread
8 | from hashlib import md5
9 | from redis_wrap import dump_method
10 |
11 | task_registry = {}
12 |
13 | def register(func, func_name = None):
14 | """ 注册task
15 |
16 | 定义::
17 |
18 | def echo(aaa, bb, c=1):
19 | print aaa, bb, c
20 |
21 | 注册远端任务::
22 |
23 | from zopen_redis import task_registry
24 | task_registry.register(echo)
25 | """
26 | task_registry[func_name or func.__name__] = func
27 |
28 | def split_full_func_name(full_func_name):
29 | splitted_func_name = full_func_name.rsplit(':', 1)
30 | # 如果没有,就到默认队列
31 | if len(splitted_func_name) == 1:
32 | return 'default', full_func_name
33 | else:
34 | return splitted_func_name
35 |
36 | def gen_task(func_name, *args, **kw):
37 | callback = kw.pop('ztq_callback', '')
38 | callback_args = kw.pop('ztq_callback_args', ())
39 | callback_kw = kw.pop('ztq_callback_kw', {})
40 |
41 | fcallback = kw.pop('ztq_fcallback', '')
42 | fcallback_args = kw.pop('ztq_fcallback_args', ())
43 | fcallback_kw = kw.pop('ztq_fcallback_kw', {})
44 |
45 | pcallback = kw.pop('ztq_pcallback', '')
46 | pcallback_args = kw.pop('ztq_pcallback_args', ())
47 | pcallback_kw = kw.pop('ztq_pcallback_kw', {})
48 | return {'func':func_name,
49 | 'args':args,
50 | 'kw':kw,
51 |
52 | 'callback':callback,
53 | 'callback_args':callback_args,
54 | 'callback_kw':callback_kw,
55 |
56 | 'fcallback':fcallback,
57 | 'fcallback_args':fcallback_args,
58 | 'fcallback_kw':fcallback_kw,
59 |
60 | 'pcallback':pcallback,
61 | 'pcallback_args':pcallback_args,
62 | 'pcallback_kw':pcallback_kw,
63 | }
64 |
65 | def _get_task_md5(task):
66 | """ 得到task(dict) 的md5值 """
67 | #_value = json.dumps(task, sort_keys=True)
68 | _value = dump_method['json'](task)
69 |
70 | return md5(_value).digest()
71 |
72 | def push_buffer_task(full_func_name, *args, **kw):
73 | queue_name, func_name = split_full_func_name(full_func_name)
74 | task = gen_task(func_name, *args, **kw)
75 | model.get_buffer_queue(queue_name).push(task)
76 |
77 | def push_task(full_func_name, *args, **kw):
78 | """
79 | callback: 这是另外一个注册的task,在func调用完毕后,会启动这个
80 |
81 | 加入队列::
82 |
83 | task_regitry.push(u'foo:echo', aaa, bb, foo='bar',
84 | callback='foo:callback', callback_args=(12,32,3), callback_kw={})
85 | """
86 | system = kw.pop('ztq_system', 'default')
87 | queue_name, func_name = split_full_func_name(full_func_name)
88 | to_right = kw.pop('ztq_first', False)
89 | # 队列运行相关信息
90 | runtime = kw.pop('runtime', \
91 | {'create':int(time.time()), 'queue':queue_name})
92 |
93 | task = gen_task(func_name, *args, **kw)
94 | task_md5 = _get_task_md5(task)
95 |
96 | task_hash = model.get_task_hash(queue_name, system=system)
97 |
98 | # 因为queue队列有worker不停在监视,必须先将hash的内容push,在将queue的内容push
99 | task['runtime'] = runtime
100 | if task_hash.__setitem__(task_md5, task) == 1:
101 | # 如果返回值等于0, 说明task_md5已经存在
102 | queue = model.get_task_queue(queue_name, system=system)
103 | queue.push(task_md5, to_left=not to_right)
104 |
105 | def push_runtime_task(queue_name, task):
106 | """ 直接将task push 到 redis """
107 | _push_runtime_job(queue_name, task, model.get_task_hash, model.get_task_queue)
108 |
109 | def push_runtime_error(queue_name, error):
110 | _push_runtime_job(queue_name, error, model.get_error_hash, model.get_error_queue)
111 |
112 | def _push_runtime_job(queue_name, task, get_hash, get_queue):
113 | to_left = task.get('kw', {}).pop('to_left', True)
114 | runtime = task.pop('runtime')
115 |
116 | task_md5 = _get_task_md5(task)
117 | task_hash = get_hash(queue_name)
118 |
119 | # 因为queue队列有worker不停在监视,必须先将hash的内容push,在将queue的内容push
120 | task['runtime'] = runtime
121 | if task_hash.__setitem__(task_md5, task) == 1:
122 | # 如果返回值等于0, 说明task_md5已经存在
123 | queue = get_queue(queue_name)
124 | queue.push(task_md5, to_left=to_left)
125 |
126 | def pop_task(queue_name, task_md5=None, timeout=0, from_right=True):
127 | """ 取出,并删除 """
128 | return _pop_job(queue_name, task_md5,
129 | model.get_task_hash, model.get_task_queue, timeout, from_right)
130 |
131 | def pop_error(queue_name, task_md5=None, timeout=0, from_right=True):
132 | return _pop_job(queue_name, task_md5,
133 | model.get_error_hash, model.get_error_queue, timeout, from_right)
134 |
135 | def _pop_job(queue_name, task_md5, get_hash, get_queue, timeout=0, from_right=True):
136 |
137 | if not task_md5:
138 | task_md5 = get_queue(queue_name).pop(timeout=timeout, \
139 | from_right=from_right)
140 | else:
141 | get_queue(queue_name).remove(task_md5)
142 |
143 | if not task_md5: return None # 可能超时了
144 |
145 | task_hash = get_hash(queue_name)
146 | return task_hash.pop(task_md5)
147 |
148 | class JobThread(Thread):
149 | def __init__(self,queue_name):
150 | super(JobThread,self).__init__()
151 | self.queue_name = queue_name
152 |
153 | def run(self):
154 | """ 阻塞方式找到任务,并自动调用"""
155 | queue = model.get_task_queue(self.queue_name)
156 | while True:
157 | task = queue.pop()
158 | try:
159 | task_registry[task['func']](*task['args'], **task['kw'])
160 | if task['callback']:
161 | callback_args = task.get('callback_args', ())
162 | callback_kw = task.get('callback_kw', {})
163 | push_task(task['callback'], *callback_args, **callback_kw)
164 | except Exception, e:
165 | print str(e)
166 |
167 | def has_task(queue_name, task, to_front=False):
168 | """ 检查是否存在某个job
169 | 在queue_name的队列上,在arg_index的位置,对于func_name, 值为arg_value
170 | 如果不存在,返回false, 在worker中工作,返回‘work', 队列中返回’queue'
171 | """
172 | runtime = task.pop('runtime', None)
173 | task_md5 = _get_task_md5(task)
174 | if not runtime is None: task['runtime'] = runtime
175 |
176 | # 检查work现在的工作
177 | worker_list = model.get_all_worker()
178 | for worker_name in worker_list:
179 | worker_job = model.get_job_state(worker_name)
180 | if not worker_job: continue
181 | for thread_name, job in worker_job.items():
182 | job.pop('runtime', '')
183 | job.pop('process', '')
184 | if _get_task_md5(job) == task_md5:
185 | return 'running'
186 |
187 | # 检查所在队列
188 | queue_name = queue_name
189 | task_hash = model.get_task_hash(queue_name)
190 | if task_md5 in task_hash:
191 | if to_front: # 调整顺序
192 | task_queue = model.get_task_queue(queue_name)
193 | task_queue.remove(task_md5)
194 | task_queue.push(task_md5, to_left=False)
195 | return 'queue'
196 |
197 | return 'none'
198 |
199 |
--------------------------------------------------------------------------------
/ztq_worker/ztq_worker/job_thread.py:
--------------------------------------------------------------------------------
1 | # -*- encoding: utf-8 -*-
2 | import threading
3 | import time, sys
4 | import traceback
5 | import logging
6 |
7 | from config_manager import CONFIG
8 | import ztq_core
9 |
10 | thread_context = threading.local()
11 | logger = logging.getLogger("ztq_worker")
12 | QUEUE_TIMEOUT = 30
13 |
14 | def report_job(pid=-1, comment='', **kw):
15 | """ 报告当前转换进程信息 """
16 | if not hasattr(thread_context, 'job'):
17 | return # 如果不在线程中,不用报告了
18 |
19 | job = thread_context.job
20 |
21 | # 报告转换状态
22 | job['process'].update({'pid': pid,
23 | 'start':int(time.time()),
24 | 'comment':comment})
25 | if kw:
26 | job['process'].update(kw)
27 |
28 | # 写入状态
29 | job_state = ztq_core.get_job_state(job['runtime']['worker'])
30 | job_state[job['runtime']['thread']] = job
31 |
32 | def report_progress(**kw):
33 | """ 报告当前转换进程信息 """
34 | if not hasattr(thread_context, 'job'):
35 | return # 如果不在线程中,不用报告了
36 |
37 | job = thread_context.job
38 |
39 | if not 'progress_callback' in job: return
40 | # 报告转换进度
41 | progress_func = ztq_core.task_registry[job['pcallback']]
42 | progress_args = job.get('pcallback_args', [])
43 | progress_kw = job.get('pcallback_kw', {})
44 | progress_kw.update(kw)
45 | progress_func(*progress_args, **progress_kw)
46 |
47 | class JobThread(threading.Thread):
48 | """ 监视一个原子队列,调用转换引擎取转换
49 | 转换结果记录转换队列,转换出错需要记录出错日志与错误队列
50 | """
51 | def __init__(self, queue_name, sleep_time, from_right=True):
52 | super(JobThread, self).__init__()
53 | self.queue_name = queue_name
54 | self.sleep_time = sleep_time
55 | self.from_right = from_right # 读取服务器队列的方向,从左边还是右边
56 | # _stop 为 True 就会停止这个线程
57 | self._stop = False
58 | self.start_job_time = 0 # 记录任务开始时间
59 |
60 | def run(self):
61 | """ 阻塞方式找到任务,并自动调用"""
62 | # 如果上次有任务在运行还没结束,重新执行
63 | jobs = ztq_core.get_job_state(CONFIG['server']['alias'])
64 | if self.name in jobs:
65 | self.start_job(jobs[self.name])
66 |
67 | # 队列批处理模式
68 | # batch_size: 批处理的阀值,达到这个阀值,就执行一次batch_func
69 | # batch_func:
70 | # 1, 执行一批batch_size 大小的任务后,后续自动执行这个方法方法
71 | # 2, 执行一批小于batch_size 大小的任务后,再得不到任务,后续自动执行这个方法
72 | batch_config = CONFIG.get("batch_queue", {}).get(self.queue_name, {})
73 | batch_size = batch_config.get('batch_size', None) or -1
74 | batch_func = batch_config.get('batch_func', None) or (lambda *args, **kw: -1)
75 |
76 | run_job_index = 0
77 | queue_tiemout = QUEUE_TIMEOUT
78 | # 循环执行任务
79 | while not self._stop:
80 | try:
81 | task = ztq_core.pop_task(
82 | self.queue_name,
83 | timeout=queue_tiemout,
84 | from_right=self.from_right
85 | )
86 | except ztq_core.ConnectionError, e:
87 | logger.error('ERROR: redis connection error: %s' % str(e))
88 | time.sleep(3)
89 | continue
90 | except ztq_core.ResponseError, e:
91 | logger.error('ERROR: redis response error: %s' % str(e))
92 | time.sleep(3)
93 | continue
94 | except Exception, e:
95 | logger.error('ERROR: redis unknown error: %s' % str(e))
96 | time.sleep(3)
97 | continue
98 |
99 | if task is None:
100 | # 没有后续任务了。执行batch_func
101 | if run_job_index > 0:
102 | run_job_index = 0
103 | queue_tiemout = QUEUE_TIMEOUT
104 | try:
105 | batch_func()
106 | except Exception, e:
107 | logger.error('ERROR: batch execution error: %s' % str(e))
108 | continue
109 |
110 | try:
111 | self.start_job(task)
112 | except Exception, e:
113 | logger.error('ERROR: job start error: %s' % str(e))
114 |
115 | if batch_size > 0:
116 | if run_job_index >= batch_size - 1:
117 | # 完成了一批任务。执行batch_func
118 | run_job_index = 0
119 | queue_tiemout = QUEUE_TIMEOUT
120 | try:
121 | batch_func()
122 | except Exception, e:
123 | logger.error('ERROR: batch execution error: %s' % str(e))
124 | else:
125 | run_job_index += 1
126 | queue_tiemout = -1
127 |
128 | if self.sleep_time:
129 | time.sleep(self.sleep_time)
130 |
131 | def start_job(self, task):
132 | self.start_job_time = int(time.time())
133 | task['runtime'].update({'worker': CONFIG['server']['alias'],
134 | 'thread': self.getName(),
135 | 'start': self.start_job_time, })
136 | # 记录当前在做什么
137 | task['process'] = {'ident':self.ident}
138 | thread_context.job = task
139 | try:
140 | # started report
141 | report_job(comment='start the job')
142 | self.run_task = ztq_core.task_registry[task['func']]
143 | self.run_task(*task['args'], **task['kw'])
144 |
145 | task['runtime']['return'] = 0
146 | task['runtime']['reason'] = 'success'
147 |
148 | if task.get('callback', None):
149 | callback_args = task.get('callback_args', ())
150 | callback_kw = task.get('callback_kw', {})
151 | ztq_core.push_task(task['callback'], *callback_args, **callback_kw)
152 |
153 | except Exception, e:
154 | reason = traceback.format_exception(*sys.exc_info())
155 | # 将错误信息记录到服务器
156 | try:
157 | return_code = str(e.args[0]) if len(e.args) > 1 else 300
158 | except:
159 | return_code = 300
160 | task['runtime']['return'] = return_code
161 | task['runtime']['reason'] = reason[-11:]
162 | task['runtime']['end'] = int( time.time() )
163 | ztq_core.push_runtime_error(self.queue_name, task)
164 | # 错误回调
165 | if task.get('fcallback', None):
166 | callback_args = task.get('fcallback_args', ())
167 | callback_kw = task.get('fcallback_kw', {})
168 | callback_kw['return_code'] = return_code
169 | callback_kw['return_msg'] = unicode(reason[-1], 'utf-8', 'ignore')
170 | ztq_core.push_task(task['fcallback'], *callback_args, **callback_kw)
171 | # 在终端打印错误信息
172 | #reason.insert(0, str(datetime.datetime.today()) + '\n')
173 | logger.error(''.join(reason))
174 |
175 | # 任务结束,记录日志
176 | task['runtime']['end'] = int( time.time() )
177 | ztq_core.get_work_log_queue().push(task)
178 | # 删除服务器的转换进程状态信息
179 | job_state = ztq_core.get_job_state(task['runtime']['worker'])
180 | try:
181 | del job_state[task['runtime']['thread']]
182 | except KeyError:
183 | pass # maybe killed
184 | self.start_job_time = 0
185 |
186 | def stop(self):
187 | """ 结束这个进程,会等待当前转换完成
188 | 请通过JobThreadManager 来完成工作线程的退出,不要直接使用这个方法
189 | """
190 | self._stop = True
191 |
192 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/utils/get_fts_data.py:
--------------------------------------------------------------------------------
1 | #coding:utf-8
2 | '''
3 | 功能描述: 此模块用于数据整合, 被views模块调用.
4 |
5 | Created on 2011-4-28
6 |
7 | @author: Zay
8 | '''
9 | import time, pprint, datetime
10 | import ztq_core
11 | import urllib
12 | try:
13 | import json
14 | except: import simplejson as json
15 |
16 | def get_sys_log(sindex=None, eindex=None):
17 | log_queue = ztq_core.get_system_log_queue()
18 | for log in log_queue[sindex:eindex]:
19 | log['_alias'] = log.get('alias', '')
20 | log['_host'] = log.get('host', '')
21 | log['_type'] = log.get('type', '')
22 | log['_timestamp'] = datetime.datetime.fromtimestamp(log.get('timestamp', 0))
23 | yield log
24 |
25 | def get_worker_log(sindex=None, eindex=None):
26 | worker_log_queue = ztq_core.get_work_log_queue()
27 | for worker_log in worker_log_queue[sindex:eindex]:
28 | # 检查worker是否还存在
29 | log = {}
30 | log['_server'] = worker_log['runtime']['worker']
31 | log['_created'] = datetime.datetime.fromtimestamp(worker_log['runtime'].get('create', 0))
32 | log['_start'] = datetime.datetime.fromtimestamp(worker_log['runtime'].get('start', 0))
33 | log['_end'] = datetime.datetime.fromtimestamp(worker_log['runtime'].get('end', 0))
34 | log['_status'] = worker_log['runtime']['return']
35 | log['_func'] = worker_log['func']
36 | log['_comment'] = worker_log['process'].get('comment','')
37 | log['_file'] = worker_log['kw'].get('comment', worker_log['kw'].get('path', ''))
38 | log['_reason'] = ''.join(worker_log['runtime']['reason'])
39 | log['_detail'] = pprint.pformat(worker_log)
40 | yield log
41 |
42 | def get_taskqueues_list():
43 | # 队列情况列表
44 | queues_list = ztq_core.get_queue_config()
45 |
46 | # 排序
47 | sort_queue_name = {}
48 | for queue_name, queue_config in queues_list.items():
49 | sort_queue_name[queue_name] = len(ztq_core.get_error_queue(queue_name))
50 |
51 | for queue_name in sorted(sort_queue_name,
52 | key=lambda x: sort_queue_name[x],
53 | reverse=True):
54 | task_queue = {}
55 | task_queue['name'] = queue_name
56 | #task_queue['tags'] = queue_config.get('tags',())
57 | queue = ztq_core.get_task_queue(queue_name)
58 | # 任务数/错误数
59 | task_queue['length'] = len(queue)
60 | task_queue['error_length'] = sort_queue_name[queue_name]
61 |
62 | #任务首个时间
63 | task_queue['error_end'] = task_queue['first'] = ''
64 | first_job = queue[0]
65 | first_job= ztq_core.get_task_hash(queue_name).get(first_job)
66 | if first_job:
67 | task_queue['first'] = datetime.datetime.fromtimestamp(first_job['runtime'].get('create', 0))
68 |
69 | #错误最末一个的时间
70 | error_first_job = ztq_core.get_error_queue(queue_name)[0]
71 | error_first_job = ztq_core.get_error_hash(queue_name).get(error_first_job)
72 | if error_first_job:
73 | task_queue['error_end'] = datetime.datetime.fromtimestamp(error_first_job['runtime'].get('create', 0))
74 |
75 | # 获取worker工作线程配置
76 | workers_config = ztq_core.get_worker_config()
77 | task_queue['from_right'] = True
78 | for worker_name,worker_config in workers_config.items():
79 | task_queue['workers'] = []
80 | for config in worker_config.get(queue_name,[]):
81 | task_queue['workers'].append([worker_name+':', config['interval']])
82 | if 'from_right' in config:
83 | task_queue['from_right'] = config['from_right']
84 | task_queue['buffer_length'] = len(ztq_core.get_buffer_queue(queue_name))
85 | yield task_queue
86 |
87 | def get_queues_jobs(queue_name):
88 | queue = ztq_core.get_task_queue(queue_name)
89 | for task_job_hash in queue.reverse():
90 | task_job = ztq_core.get_task_hash(queue_name).get(task_job_hash)
91 | tmp_job={}
92 | tmp_job['_queue_name'] = queue_name
93 | tmp_job['_id'] = urllib.quote(task_job_hash)
94 | #tmp_job['_ori'] = task_job
95 | tmp_job['_detail'] = pprint.pformat(task_job)
96 | tmp_job['_created'] = datetime.datetime.fromtimestamp(task_job['runtime'].get('create', 0))
97 | yield tmp_job
98 |
99 | def get_all_error_jobs(sindex=0, eindex=-1):
100 | queues_list = ztq_core.get_queue_config()
101 | index = 0
102 | count = eindex - sindex
103 | for queue_name in queues_list.keys():
104 | error_len = len(ztq_core.get_error_queue(queue_name))
105 | if error_len == 0: continue
106 | # 确定从哪里开始
107 | index += error_len
108 | if index < sindex: continue
109 |
110 | start_index = 0 if sindex-(index-error_len) < 0 else sindex-(index-error_len)
111 | yield get_error_queue_jobs(queue_name, start_index, count+start_index)
112 |
113 | # 是否应该结束
114 | count -= error_len - start_index
115 | if count < 0: break
116 |
117 | def get_error_queue(error_queue_name, sindex=0, eindex=-1):
118 | """ 模板问题的原因 """
119 | yield get_error_queue_jobs(error_queue_name, sindex, eindex)
120 |
121 | def get_error_queue_jobs(error_queue_name, sindex=0, eindex=-1):
122 | error_queue = ztq_core.get_error_queue(error_queue_name)
123 | workers_state = ztq_core.get_worker_state()
124 | for hash_key in error_queue[sindex:eindex]:
125 | error_job = ztq_core.get_error_hash(error_queue_name)[hash_key]
126 | tmp_job={}
127 | tmp_job['json'] = json.dumps(error_job)
128 | tmp_job['_queue_name'] = error_queue_name
129 | worker_name = error_job['runtime']['worker']
130 | # 检查worker是否存在,存在则取得服务器ip
131 | if worker_name in workers_state:
132 | tmp_job['_server'] = workers_state[worker_name]['ip']
133 | else: tmp_job['_server'] = worker_name
134 | tmp_job['_created'] = datetime.datetime.fromtimestamp(error_job['runtime'].get('create',0))
135 | tmp_job['_start'] = datetime.datetime.fromtimestamp(error_job['runtime'].get('start',0))
136 | tmp_job['_end'] = datetime.datetime.fromtimestamp(error_job['runtime'].get('end',0))
137 | tmp_job['_reason'] = ''.join(error_job['runtime']['reason'])
138 | tmp_job['_file'] = error_job['kw'].get('comment', error_job['kw'].get('path', ''))
139 | tmp_job['_error_mime'] = error_job['process'].get('to_mime','')
140 | tmp_job['_detail'] = pprint.pformat(error_job)
141 | tmp_job['hash_id'] = urllib.quote(hash_key)
142 | yield tmp_job
143 |
144 | def get_worker_list():
145 | workers_dict = ztq_core.get_worker_state().items()
146 | for worker_name, worker_status in workers_dict:
147 | worker_status['_worker_name'] = worker_name
148 | worker_status['_started'] = \
149 | datetime.datetime.fromtimestamp(worker_status['started'])
150 | worker_status['_timestamp'] = \
151 | datetime.datetime.fromtimestamp(worker_status['timestamp'])
152 |
153 | # 检查worker是否在工作
154 | cmd_queue = ztq_core.get_command_queue(worker_name)
155 |
156 | # 如果指令队列不为空的话,意味着worker没工作,属于下线状态
157 | if cmd_queue:
158 | worker_status['_active'] = u'shutdown'
159 | else:
160 | worker_status['_active'] = u'work'
161 |
162 | # 获取worker开了多少个线程
163 | worker_job = ztq_core.get_job_state(worker_name)
164 | worker_status['_threads'] = []
165 | for thread_name,thread_status in worker_job.items():
166 | thread_status['_detail'] = pprint.pformat(thread_status)
167 | thread_status['_name'] = thread_name
168 | thread_status['_comment'] = thread_status['kw'].get('comment',thread_status['process'].get('comment', ''))
169 | thread_status['_pid'] = thread_status['process'].get('pid', -1)
170 | ident = unicode(thread_status['process'].get('ident', -1))
171 | if ident in worker_status['traceback']:
172 | thread_status['_thread_detail'] = pprint.pformat(worker_status['traceback'][ident])
173 | # 任务进行了多少时间
174 | used_time = int(time.time())-thread_status['process']['start']
175 | if used_time > 3600:
176 | used_time = u'%.2f小时' % (used_time / 3600.0)
177 | elif used_time > 60:
178 | used_time = u'%.2f分钟' % (used_time / 60.0)
179 | thread_status['_take_time'] = used_time
180 |
181 | worker_status['_threads'].append(thread_status)
182 |
183 | yield worker_status
184 |
185 | def send_command(worker_name, command_stm):
186 | """向worker发报告状态指令
187 | """
188 | send_command= {
189 | 'command':command_stm,
190 | 'timestamp':int(time.time())
191 | }
192 | cmd_queue = ztq_core.get_command_queue(worker_name)
193 |
194 | # 避免同时发送多条同步命令
195 | if cmd_queue:
196 | for command in cmd_queue:
197 | if command.get('command', None) == send_command['command']:
198 | return 0
199 | cmd_queue.push(send_command)
200 |
--------------------------------------------------------------------------------
/ztq_core/ztq_core/redis_wrap.py:
--------------------------------------------------------------------------------
1 | #coding:utf-8
2 |
3 | import redis
4 | import pickle
5 | try:
6 | import json
7 | except :
8 | import simplejson as json
9 |
10 | import UserDict, UserList
11 |
12 | ConnectionError = redis.exceptions.ConnectionError
13 | ResponseError = redis.exceptions.ResponseError
14 |
15 | DEFAULT_ENCODING = 'UTF-8' # sys.getdefaultencoding()
16 | #--- System related ----------------------------------------------
17 | SYSTEMS = {
18 | 'default': redis.Redis(host='localhost', port=6379)
19 | }
20 |
21 | RECOVER_FUNCS = {'default':None}
22 |
23 | def setup_redis(name, host, port, db=0, recover_func=None, **kw):
24 | SYSTEMS[name] = redis.Redis(host=host, port=port, db=db, **kw)
25 | RECOVER_FUNCS[name] = recover_func
26 |
27 | def get_redis(system='default'):
28 | return SYSTEMS[system]
29 |
30 | def ha_redis(func):
31 | """ 让redis访问高可用 """
32 | def new_func(self, *args, **kw):
33 | try:
34 | return func(self, *args, **kw)
35 | except ConnectionError, e:
36 | recover_func = RECOVER_FUNCS[self.system]
37 | if recover_func is not None:
38 | recover_func(self.system)
39 | return func(self, *args, **kw)
40 | else:
41 | raise
42 | return new_func
43 |
44 | #--- Decorators ----------------------------------------------
45 | def get_list(name, system='default',serialized_type='json'):
46 | return ListFu(name, system, serialized_type=serialized_type)
47 |
48 | def get_queue(name, system='default',serialized_type='json'):
49 | return QueueFu(name, system, serialized_type=serialized_type)
50 |
51 | def get_limit_queue(name, length, system='default',serialized_type='json'):
52 | return LimitQueueFu(name, length, system, serialized_type=serialized_type)
53 |
54 | def get_hash(name, system='default',serialized_type='json'):
55 | return HashFu(name, system, serialized_type=serialized_type)
56 |
57 | def get_set(name, system='default',serialized_type='json'):
58 | return SetFu(name, system, serialized_type=serialized_type)
59 |
60 | def get_dict(name, system='default',serialized_type='json'):
61 | return DictFu(name, system, serialized_type=serialized_type)
62 |
63 | def get_key(name, system='default',serialized_type='json'):
64 | loads = load_method[serialized_type]
65 | value = get_redis(system).get(name)
66 | try:
67 | return loads(value)
68 | except:return value
69 |
70 | def del_key(name, system='default'):
71 | get_redis(system).delete(name)
72 |
73 | def get_keys(name, system='default'):
74 | for key in get_redis(system).keys(name + "*"):
75 | key_name = key[len(name):]
76 | yield key_name
77 |
78 | def set_key(name, value, system='default',serialized_type='json'):
79 | dumps = dump_method[serialized_type]
80 | value = dumps(value)
81 | get_redis(system).set(name, value)
82 |
83 | #---serialize data type----------------------------------------
84 | def _convert_persistent_obj(obj):
85 | # fix json.dumps raise TypeError
86 | # 是persistent 对象
87 | if isinstance(obj, (UserDict.UserDict, dict)):
88 | return dict(obj)
89 | elif isinstance(obj, (UserList.UserList, list, set)):
90 | return list(obj)
91 | raise TypeError, '%s: %s is not JSON serializable'%(type(obj), repr(obj))
92 |
93 | dump_method = {'json':lambda item : json.dumps(item, sort_keys=True, \
94 | encoding=DEFAULT_ENCODING, default=_convert_persistent_obj),
95 | 'pickle':pickle.dumps,
96 | 'string':str
97 | }
98 | load_method = {'json':json.loads,
99 | 'pickle':pickle.loads,
100 | 'string':str
101 | }
102 |
103 | #--- Data impl. ----------------------------------------------
104 | class ListFu(object):
105 |
106 | def __init__(self, name, system, serialized_type='json'):
107 | self.name = name
108 | self.system = system
109 | self.type = serialized_type
110 | self.dumps = dump_method[serialized_type]
111 | self.loads = load_method[serialized_type]
112 |
113 | @ha_redis
114 | def append(self, item):
115 | item = self.dumps(item)
116 | get_redis(self.system).lpush(self.name, item)
117 |
118 | @ha_redis
119 | def extend(self, iterable):
120 | for item in iterable:
121 | self.append(item)
122 |
123 | @ha_redis
124 | def remove(self, value):
125 | value = self.dumps(value)
126 | get_redis(self.system).lrem(self.name, value)
127 |
128 | @ha_redis
129 | def pop(self, index=None):
130 | if index:
131 | raise ValueError('Not supported')
132 | serialized_data = get_redis(self.system).rpop(self.name)
133 | if serialized_data[1]:
134 | item = self.loads(serialized_data[1])
135 | return item
136 | else: return None
137 |
138 | @ha_redis
139 | def __len__(self):
140 | return get_redis(self.system).llen(self.name)
141 |
142 | @ha_redis
143 | def __iter__(self):
144 | client = get_redis(self.system)
145 | i = 0
146 | while True:
147 | items = client.lrange(self.name, i, i+30)
148 | if len(items) == 0:
149 | break
150 | #raise StopIteration
151 | for item in items:
152 | yield self.loads(item)
153 | i += 30
154 |
155 | @ha_redis
156 | def __getitem__(self, index):
157 | client = get_redis(self.system)
158 | value = client.lindex(self.name, index)
159 | return self.loads(value) if value else None
160 |
161 | @ha_redis
162 | def __getslice__(self, i, j):
163 | client = get_redis(self.system)
164 | items = client.lrange(self.name, i, j)
165 | for item in items:
166 | yield self.loads(item)
167 |
168 | class HashFu:
169 |
170 | def __init__(self, name, system, serialized_type='json'):
171 | self.name = name
172 | self.system = system
173 | self.dumps = dump_method[serialized_type]
174 | self.loads = load_method[serialized_type]
175 |
176 | @ha_redis
177 | def get(self, key, default=None):
178 | value = get_redis(self.system).hget(self.name, key)
179 | try:
180 | return self.loads(value)
181 | except: return default
182 |
183 | @ha_redis
184 | def items(self):
185 | for key in self.keys():
186 | # key_list 不是实时的数据
187 | # 这个任务可能已经被取走了(当监视这个队列的工作线程有多个的时候)
188 | value = self.get(key)
189 | if value is None: continue
190 |
191 | yield key, value
192 |
193 | @ha_redis
194 | def keys(self):
195 | return get_redis(self.system).hkeys(self.name) or []
196 |
197 | @ha_redis
198 | def values(self):
199 | _values = self.loads(get_redis(self.system).hvals(self.name))
200 | return _values or []
201 |
202 | @ha_redis
203 | def pop(self, key):
204 | pline = get_redis(self.system).pipeline()
205 | pline.hget(self.name, key).hdel(self.name, key)
206 | _value, _expire = pline.execute()
207 | if _expire:
208 | return self.loads(_value)
209 | else:
210 | #raise KeyError,'redis hasher not match the %s key\n\n'%key
211 | print 'redis hasher not match the %s key\n\n'%key
212 | return None
213 |
214 | @ha_redis
215 | def __len__(self):
216 | return get_redis(self.system).hlen(self.name) or 0
217 |
218 | @ha_redis
219 | def __getitem__(self, key):
220 | val = self.get(key)
221 | if val is None:
222 | raise KeyError
223 | return val
224 |
225 | @ha_redis
226 | def __setitem__(self, key, value):
227 | value = self.dumps(value)
228 | return get_redis(self.system).hset(self.name, key, value)
229 |
230 | @ha_redis
231 | def __delitem__(self, key):
232 | get_redis(self.system).hdel(self.name, key)
233 |
234 | @ha_redis
235 | def __contains__(self, key):
236 | return get_redis(self.system).hexists(self.name, key)
237 |
238 | @ha_redis
239 | def update(self, new_dict, **kw):
240 | update = {}
241 |
242 | if new_dict and hasattr(new_dict, 'keys'):
243 | for key in new_dict:
244 | update[key] = self.dumps(new_dict[key])
245 | elif new_dict:
246 | for key, value in new_dict:
247 | update[key] = self.dumps(key)
248 |
249 | for key in kw:
250 | update[key] = self.dumps(key[key])
251 |
252 | if update:
253 | get_redis(self.system).hmset(self.name, update)
254 |
255 | class SetFu:
256 |
257 | def __init__(self, name, system, serialized_type='json'):
258 | self.name = name
259 | self.system = system
260 | self.dumps = dump_method[serialized_type]
261 | self.loads = load_method[serialized_type]
262 |
263 | @ha_redis
264 | def add(self, item):
265 | item = self.dumps(item)
266 | get_redis(self.system).sadd(self.name, item)
267 |
268 | @ha_redis
269 | def remove(self, item):
270 | item = self.dumps(item)
271 | get_redis(self.system).srem(self.name, item)
272 |
273 | @ha_redis
274 | def pop(self, item):
275 | item = self.serializer.dumps(item)
276 | value = get_redis(self.system).spop(self.name, item)
277 | return self.loads(value)
278 |
279 | @ha_redis
280 | def __iter__(self):
281 | client = get_redis(self.system)
282 | for item in client.smembers(self.name):
283 | yield self.loads(item)
284 |
285 | @ha_redis
286 | def __len__(self):
287 | return len(get_redis(self.system).smembers(self.name))
288 |
289 | @ha_redis
290 | def __contains__(self, item):
291 | item = self.dumps(item)
292 | return get_redis(self.system).sismember(self.name, item)
293 |
294 | class DictFu:
295 |
296 | def __init__(self, name, system, serialized_type='json'):
297 | self.name = name
298 | self.system = system
299 | self.dumps = dump_method[serialized_type]
300 | self.loads = load_method[serialized_type]
301 |
302 | @ha_redis
303 | def get(self, key, default=None):
304 | value = get_redis(self.system).get(self.name+key)
305 | try:
306 | return self.loads(value)
307 | except: return default
308 |
309 | @ha_redis
310 | def set(self, key, value):
311 | value = self.dumps(value)
312 | get_redis(self.system).set(self.name+key, value)
313 |
314 | @ha_redis
315 | def __delitem__(self, key):
316 | get_redis(self.system).delete(self.name+key)
317 |
318 | @ha_redis
319 | def __len__(self):
320 | listkey = get_redis(self.system).keys(self.name+"*")
321 | return len(listkey) or 0
322 |
323 | @ha_redis
324 | def keys(self):
325 | prefix_len = len(self.name)
326 | return [key[prefix_len:] for key in get_redis(self.system).keys(self.name + "*")]
327 |
328 | @ha_redis
329 | def items(self):
330 | # XXX self.get 每次都要连结redis, 这样不好
331 | key_list = get_redis(self.system).keys(self.name+"*")
332 | for key in key_list:
333 | key_name = key[len(self.name):]
334 |
335 | # key_list 不是实时的数据
336 | # 这个任务可能已经被取走了(当监视这个队列的工作线程有多个的时候)
337 | value = self.get(key_name)
338 | if value is None: continue
339 |
340 | yield key_name, value
341 |
342 | @ha_redis
343 | def __getitem__(self, key=''):
344 | val = self.get(key, None)
345 | if val is None:
346 | raise KeyError
347 | return val
348 |
349 | @ha_redis
350 | def __setitem__(self, key, value):
351 | self.set(key, value)
352 |
353 | @ha_redis
354 | def __contains__(self, key):
355 | return get_redis(self.system).exists(self.name+key)
356 |
357 | class QueueFu(ListFu):
358 |
359 | def __init__(self, name, system, serialized_type='json'):
360 | super(QueueFu,self).__init__(name, system, serialized_type=serialized_type)
361 |
362 | @ha_redis
363 | def push(self, item, to_left=True):
364 | if to_left:
365 | self.append(item)
366 | else:
367 | item = self.dumps(item)
368 | get_redis(self.system).rpush(self.name, item)
369 |
370 | @ha_redis
371 | def pop(self, timeout=0, from_right = True):
372 | """
373 | 得到redis list 对象中的一个item,并把item 从 redis list 对象中删除
374 | from_right: 如果值为真,从redis list 对象右边读取,反之,从左边读取
375 | timeout: timeout 等于大于0,以阻塞式获取。timeout 小于0,直接获取返回
376 | """
377 | if from_right:
378 | if timeout >= 0:
379 | serialized_data = get_redis(self.system).brpop(self.name, timeout)
380 | else:
381 | serialized_data = get_redis(self.system).rpop(self.name)
382 | else:
383 | if timeout >= 0:
384 | serialized_data = get_redis(self.system).blpop(self.name, timeout)
385 | else:
386 | serialized_data = get_redis(self.system).lpop(self.name)
387 |
388 | if serialized_data:
389 | # 阻塞式获取,返回self.name, result
390 | if isinstance(serialized_data, (tuple, list, set)) and \
391 | len(serialized_data) == 2:
392 | return self.loads(serialized_data[1]) if serialized_data[1] else None
393 | # 直接获取,返回 result
394 | else:
395 | return self.loads(serialized_data)
396 |
397 | return None
398 |
399 | @ha_redis
400 | def reverse(self):
401 | """倒序输出结果
402 | """
403 | client = get_redis(self.system)
404 | length = client.llen(self.name)
405 | for index in xrange(length-1, -1, -1):
406 | item = client.lindex(self.name, index)
407 | yield self.loads(item)
408 |
409 | class LimitQueueFu(QueueFu):
410 | """此队列类用于控制队列长度,主要用于日志
411 | """
412 | def __init__(self, name, length, system, serialized_type='json'):
413 | super(LimitQueueFu,self).__init__(name, system, serialized_type=serialized_type)
414 | self.length = length - 1
415 |
416 | @ha_redis
417 | def push(self, item):
418 | #QueueFu.push(self, item)
419 | #get_redis(self.system).ltrim(self.name, 0, self.length)
420 |
421 | item = self.dumps(item)
422 | pline = get_redis(self.system).pipeline()
423 | pline.lpush(self.name, item).ltrim(self.name, 0, self.length)
424 | pline.execute()
425 |
426 |
--------------------------------------------------------------------------------
/ztq_console/ztq_console/views.py:
--------------------------------------------------------------------------------
1 | #coding:utf-8
2 | from pyramid.response import Response
3 | from pyramid.httpexceptions import HTTPFound
4 | from pyramid.view import view_config, forbidden_view_config
5 | from pyramid.security import remember, forget
6 | from pyramid.events import subscriber
7 | from pyramid.interfaces import IBeforeRender
8 | from pyramid.url import static_url, resource_url, route_url
9 | from pyramid.threadlocal import get_current_request
10 | import time
11 | import ztq_core
12 | import utils
13 | import urllib
14 | from utils.security import USERS
15 |
16 | current_redis = None
17 | MENU_CONFIG = {'title':u'ZTQ队列监控后台',
18 | 'servers':[
19 | #{'name':'oc', 'host':'192.168.1.115', 'port':60207, 'db':1, 'title':'OC'},
20 | #{'name':'wo', 'host':'192.168.1.115', 'port':60206, 'db':1, 'title':'WO'},
21 | #{'name':'viewer', 'host':'192.168.1.115', 'port':60208, 'db':0, 'title':'Viewer'},
22 | ],
23 | 'current_redis':'oc',
24 | 'links':[('/workerstatus', u'工作状态'),
25 | ('/taskqueues',u'工作队列'),
26 | ('/errorlog',u'错误清单'),
27 | ('/workerlog', u'工作历史'),
28 | ('/syslog', u'系统日志'),
29 | ('/password', u'修改密码'),
30 | ('/logout', u'退出登录'),
31 | ]
32 | }
33 |
34 | @view_config(renderer='mainpage.html', permission='view')
35 | def main_view(request):
36 | """后台管理首页
37 | """
38 | return MENU_CONFIG
39 |
40 | @view_config(name='top.html', renderer='top.html', permission='view')
41 | def top_view(request):
42 | """后台管理首页
43 | """
44 | return MENU_CONFIG
45 |
46 | @view_config(name='menu.html', renderer='menu.html', permission='view')
47 | def menu_view(request):
48 | """初始化菜单
49 | """
50 | return MENU_CONFIG
51 |
52 | @view_config(name='workerstatus', renderer='worker.html', permission='edit')
53 | def workers_view(request):
54 | """后台管理首页
55 | 传出参数:worker的相关信息,各个队列的工作情况
56 | """
57 |
58 | workers = utils.get_worker_list()
59 | crons = ztq_core.model.get_cron_set()
60 | return {'workers':workers, 'crons':crons}
61 |
62 | @view_config(name='syslog', permission='edit')
63 | @view_config(name='workerlog', permission='edit')
64 | @view_config(name='errorlog', permission='edit')
65 | def route_main(request):
66 | route_name = request.view_name
67 | return HTTPFound(location=request.route_url(route_name, page=1))
68 |
69 | #--------------日志信息--------------------------------
70 | @view_config(route_name='syslog', renderer='syslog.html', permission='edit')
71 | def sys_log_view(request):
72 | """查看系统日志情况
73 | """
74 | page = request.matchdict.get('page', 1)
75 | page = int(page) or 1
76 |
77 | return pageination(utils.get_sys_log, page, 'sys_log')
78 |
79 | #--------------转换历史--------------------------------
80 | @view_config(route_name='workerlog', renderer='workerlog.html', permission='edit')
81 | def worker_log_view(request):
82 | """查看转换日志
83 | """
84 | page = request.matchdict.get('page', 1)
85 | page = int(page) or 1
86 |
87 | return pageination(utils.get_worker_log, page, 'worker_log')
88 |
89 | #--------------切换Redis--------------------------------
90 | @view_config(name='switch_redis.html', permission='edit')
91 | def switch_redis(request):
92 | """ 切换redis
93 | """
94 | redis_key = request.params.get('redis_name', '')
95 | for server in MENU_CONFIG['servers']:
96 | if server['name'] == redis_key:
97 | ztq_core.setup_redis('default', host=server['host'], port=server['port'], db=server.get('db', 1))
98 | MENU_CONFIG['current_redis'] = redis_key
99 | break
100 |
101 | #route_name = request.view_name
102 | return HTTPFound(location="/")
103 |
104 |
105 | #--------------调度管理--------------------------------
106 | def config_worker(request):
107 | """对worker进行配置管理
108 | """
109 | url_action = request.params.get('action','')
110 |
111 | # 获取用户请求操作
112 | worker_id = request.matchdict['id']
113 | if url_action == 'delete':
114 | #删除还没启用的worker,删除操作不会导致调度配置更新
115 | workers_dict = ztq_core.get_worker_state()
116 | del workers_dict[worker_id]
117 | worker_job = ztq_core.get_job_state(worker_id)
118 | for job_name, job_status in worker_job.items():
119 | del worker_job[job_name]
120 | return HTTPFound(location = '/workerstatus')
121 | elif url_action == 'update':
122 | # 发报告指令到各命令队列让worker报告自身状态
123 | worker_list = ztq_core.get_all_worker()
124 | for worker_name in worker_list:
125 | if worker_name == worker_id:
126 | utils.send_command(worker_name, 'report')
127 | time.sleep(1)
128 | return HTTPFound(location = '/workerstatus')
129 | return HTTPFound(location = '/workerstatus')
130 |
131 | def stop_working_job(request):
132 | """停止正在进行中的转换的工作
133 | """
134 | # 获取url操作
135 | worker_id = request.matchdict['id']
136 | thread = request.matchdict['thread']
137 | thread_pid = request.matchdict['pid']
138 | # pid为-1则不能杀
139 | if thread_pid == '-1':
140 | jobs = ztq_core.get_job_state(worker_id)
141 | task = jobs[thread]
142 | task['runtime']['reason'] = "manual stopped"
143 | task['runtime']['end'] = int( time.time() )
144 | ztq_core.push_runtime_error(task['runtime']['queue'], task)
145 | del jobs[thread]
146 | return HTTPFound(location = '/workerstatus')
147 |
148 | kill_command = {
149 | 'command':'kill',
150 | 'timestamp':int(time.time()),
151 | 'pid': thread_pid
152 | }
153 | cmd_queue = ztq_core.get_command_queue(worker_id)
154 | # 避免同时发送多条结束命令
155 | if cmd_queue:
156 | for command in cmd_queue:
157 | if command.get('pid', None) == kill_command['pid']:
158 | return HTTPFound(location = '/workerstatus')
159 | cmd_queue.push(kill_command)
160 |
161 | return HTTPFound(location = '/workerstatus')
162 |
163 |
164 | #--------------查看队列详情-------------------------------
165 | @view_config(name='taskqueues', renderer='queues.html', permission='edit')
166 | def task_queues(request):
167 | """查看转换队列运行状态
168 | 传出参数:所有原子队列的运行转换
169 | """
170 | task_job_length = 0
171 | error_job_length = 0
172 | # 计算原子队列,原始队列和错误队列的总长度
173 | queues_list = ztq_core.get_queue_config()
174 | for queue_name, queue_config in queues_list.items():
175 | task_job_length += len(ztq_core.get_task_queue(queue_name))
176 | error_job_length += len(ztq_core.get_error_queue(queue_name))
177 | task_queues = utils.get_taskqueues_list()
178 |
179 | return {'task_queues':task_queues,
180 | 'task_job_length':task_job_length,
181 | 'error_job_length':error_job_length, }
182 |
183 | @view_config(route_name='taskqueue',renderer='jobs.html', permission='edit')
184 | def taskqueue(request):
185 | """用于查看某个队列的详细信息和运行情况
186 | """
187 | queue_id = request.matchdict['id']
188 | jobs = utils.get_queues_jobs(queue_id)
189 | return {'jobs':jobs, 'queue_name':queue_id}
190 |
191 | def config_queue(request):
192 | """管理队列线程数量
193 | 传入参数:http://server/taskqueues/q01/config?action=queue_down
194 | """
195 | queue_id = request.matchdict['id']
196 | url_action = request.params.get('action','')
197 |
198 | # 对所有的worker的队列调整数量
199 | for worker_name in ztq_core.get_worker_config().keys():
200 | utils.update_queue_threads(worker_name, queue_id, action=url_action)
201 | return HTTPFound(location = '/taskqueues')
202 |
203 | @view_config(route_name='taskqueue_action', permission='edit')
204 | def task_jobs_handler(request):
205 | """将任务调整到队头或者队尾
206 | 传入参数:http://server/taskqueues/q01/job?action=high_priority&hash_id={{job_hash_id}}
207 | """
208 | valid_action = ('high_priority','low_priority', 'delete')
209 | queue_name = request.matchdict['id']
210 | url_action = request.params.get('action','')
211 | job_hash_id = urllib.unquote(request.params.get('hash_id').encode('utf8'))
212 | if url_action in valid_action:
213 | if url_action == 'high_priority':
214 | job_queue = ztq_core.get_task_queue(queue_name)
215 | job_queue.remove(job_hash_id)
216 | job_queue.push(job_hash_id, to_left=False)
217 | elif url_action == 'low_priority':
218 | job_queue = ztq_core.get_task_queue(queue_name)
219 | job_queue.remove(job_hash_id)
220 | job_queue.push(job_hash_id)
221 | elif url_action == 'delete':
222 | job_queue = ztq_core.get_task_queue(queue_name)
223 | job_queue.remove(job_hash_id)
224 | job_hash = ztq_core.get_task_hash(queue_name)
225 | job_hash.pop(job_hash_id)
226 | return HTTPFound(location = '/taskqueues/'+queue_name)
227 | else:
228 | return Response('Invalid request')
229 |
230 | #--------------登录界面--------------------------------
231 | @view_config(route_name='login', renderer='templates/login.pt')
232 | @forbidden_view_config(renderer='templates/login.pt')
233 | def login(request):
234 | login_url = request.route_url('login')
235 | referrer = request.url
236 | if referrer == login_url:
237 | referrer = '/' # never use the login form itself as came_from
238 | came_from = request.params.get('came_from', referrer)
239 | message = ''
240 | login = 'admin'
241 | password = ''
242 | from utils.password import get_password
243 | if 'form.submitted' in request.params:
244 | login = request.params['login']
245 | password = request.params['password']
246 | try:
247 | if get_password() == password:
248 | headers = remember(request, login)
249 | return HTTPFound(location = came_from, headers = headers)
250 | except:
251 | if USERS.get(login) == password:
252 | headers = remember(request, login)
253 | return HTTPFound(location = came_from, headers = headers)
254 | message = 'Failed login'
255 |
256 |
257 | return dict(
258 | message = message,
259 | url = request.application_url + '/login',
260 | came_from = came_from,
261 | login = login,
262 | password = password,
263 | )
264 |
265 | @view_config(route_name='logout')
266 | def logout(request):
267 | headers = forget(request)
268 | return HTTPFound(location = '/workerstatus', headers = headers)
269 |
270 | @view_config(route_name='password', renderer='templates/password.pt', permission='edit')
271 | def password(request):
272 | new_password = ''
273 | from utils.password import modify_password
274 | if 'form.submitted' in request.params:
275 | new_password = request.params['new_password']
276 | modify_password(new_password)
277 | return HTTPFound(location= '/logout')
278 |
279 | return dict(
280 | new_password = new_password,
281 | url = request.application_url + '/password',
282 | )
283 |
284 | #--------------错误处理--------------------------------
285 | @view_config(route_name='errorlog', renderer='errorlog.html', permission='edit')
286 | def error_queue_detail(request):
287 | """用于查看所有错误队列的详细信息和运行情况
288 | error_queue = 'ztq:queue:error:' + queue_name
289 | """
290 | page = request.matchdict.get('page', 1)
291 | page = int(page) or 1
292 | return pageination(utils.get_all_error_jobs, page, 'error_jobs')
293 |
294 | @view_config(route_name='errorqueue', renderer='errorlog.html', permission='edit')
295 | def errorqueue(request):
296 | """用于查看单个错误队列的详细信息和运行情况
297 | """
298 | error_queue_id = request.matchdict['id']
299 | page = request.matchdict.get('page', 1)
300 | page = int(page) or 1
301 | return pageination(utils.get_error_queue, page,
302 | 'error_jobs', error_queue_id)
303 |
304 | def error_jobs_handler(request):
305 | """从错误队列中移除或重做某个失败的转换
306 | 传入参数:http://server/errorqueues/q01/job?action=remove{redo}&hash_id={{hashid}}
307 | """
308 | valid_action = ('remove','redo')
309 | queue_id = request.matchdict['id']
310 | url_action = request.params.get('action','')
311 | hash_id = urllib.unquote(request.params.get('hash_id').encode('utf8'))
312 | if url_action in valid_action:
313 | if url_action == 'remove':
314 | ztq_core.pop_error(queue_id, hash_id)
315 | elif url_action == 'redo':
316 | task = ztq_core.pop_error(queue_id, hash_id)
317 | task['runtime'] = {'queue':queue_id, 'create':int(time.time())}
318 | ztq_core.push_runtime_task(queue_id, task)
319 | return HTTPFound(location = '/errorlog')
320 | else: return Response('Invalid request')
321 |
322 | @view_config(route_name='redo_all_error_for_queue', permission='edit')
323 | def redo_all_error_for_queue(request):
324 | """重做这个错误队列所有的任务
325 | """
326 | queue_id = request.matchdict['id']
327 |
328 | while 1:
329 | error_task = ztq_core.pop_error(queue_id, timeout=-1)
330 | if error_task is None:
331 | break
332 | error_task['runtime'] = {'queue':queue_id, 'create':int(time.time())}
333 | ztq_core.push_runtime_task(queue_id, error_task)
334 |
335 | return HTTPFound(location = '/taskqueues')
336 |
337 | @view_config(route_name='del_all_error_for_queue', permission='edit')
338 | def del_all_error_for_queue(request):
339 | """删除这个错误队列所有的任务
340 | """
341 | queue_id = request.matchdict['id']
342 |
343 | error_hash = ztq_core.get_error_hash(queue_id)
344 | error_queue = ztq_core.get_error_queue(queue_id)
345 |
346 | client = ztq_core.get_redis()
347 | client.delete(error_queue.name)
348 | client.delete(error_hash.name)
349 |
350 | return HTTPFound(location = '/taskqueues')
351 |
352 | #-------------------------------------------------------------
353 | @subscriber(IBeforeRender)
354 | def add_globals(event):
355 | '''Add *context_url* and *static_url* functions to the template
356 | renderer global namespace for easy generation of url's within
357 | templates.
358 | '''
359 | request = event['request']
360 | def context_url(s, context=None,request=request):
361 | if context is None:
362 | context = request.context
363 | url = resource_url(context,request)
364 | if not url.endswith('/'):
365 | url += '/'
366 | return url + s
367 | def gen_url(route_name=None, request=request, **kw):
368 | if not route_name:
369 | local_request = get_current_request()
370 | route_name = local_request.matched_route.name
371 | url = route_url(route_name, request, **kw)
372 | return url
373 | event['gen_url'] = gen_url
374 | event['context_url'] = context_url
375 | event['static_url'] = lambda x: static_url(x, request)
376 |
377 | def pageination(gen_func, page, resource_name, *args):
378 | sindex = ( page - 1 ) * 20
379 | eindex = page * 20
380 | fpage = page - 1
381 | npage = page + 1
382 | resource = gen_func(*args, sindex=sindex, eindex=eindex-1)
383 | return {resource_name:resource,
384 | 'sindex':str(sindex + 1),
385 | 'eindex':str(eindex),
386 | 'fpage':fpage,
387 | 'npage':npage,
388 | }
389 |
390 |
--------------------------------------------------------------------------------