├── .gitignore ├── README.md ├── config.py ├── daemonize.py ├── log.py ├── requirement.txt ├── scheduler.py └── worker.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.log 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # worker_handler 2 | 3 | ## 一个用来管理多进程框架 4 | 5 | ``` 6 | pip install -r requirement.txt 7 | python scheduler.py 8 | ``` 9 | 10 | 调试阶段... ... 11 | -------------------------------------------------------------------------------- /config.py: -------------------------------------------------------------------------------- 1 | pid_file = 'pid.sock' 2 | max_requests = 100000 3 | daemon_flag = True 4 | process_num = 10 5 | log_file = 'debug.log' 6 | -------------------------------------------------------------------------------- /daemonize.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding:utf-8 -*- 3 | import os 4 | import time 5 | 6 | def daemonize(): 7 | pid=os.fork() # fork1 8 | if pid<0: # error 9 | print "fork1 error" 10 | return -1 11 | elif pid>0: # parent. 12 | exit(0) 13 | os.chdir(os.getcwd()) 14 | os.setsid() 15 | pid=os.fork() # fork 2 16 | if pid<0: 17 | print "fork2 error" 18 | return -1 19 | elif pid>0: 20 | exit(0) 21 | os.umask(0) 22 | os.close(0) 23 | os.close(1) 24 | os.close(2) 25 | fd=os.open('/dev/null', 2) 26 | os.dup(fd) 27 | os.dup(fd) 28 | 29 | if __name__ == "__main__": 30 | daemonize() 31 | time.sleep(30) 32 | -------------------------------------------------------------------------------- /log.py: -------------------------------------------------------------------------------- 1 | from config import log_file 2 | import logging 3 | 4 | def get_logger(LOGFILE): 5 | logger = logging.getLogger('worker') 6 | logger.setLevel(logging.INFO) 7 | fh = logging.FileHandler(LOGFILE) 8 | fh.setLevel(logging.DEBUG) 9 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 10 | fh.setFormatter(formatter) 11 | logger.addHandler(fh) 12 | return logger 13 | 14 | logger = get_logger(log_file) 15 | -------------------------------------------------------------------------------- /requirement.txt: -------------------------------------------------------------------------------- 1 | setproctitle 2 | -------------------------------------------------------------------------------- /scheduler.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding:utf-8 -*- 3 | import os 4 | import time 5 | import signal 6 | import logging 7 | from multiprocessing import Process, Value 8 | 9 | from setproctitle import setproctitle 10 | 11 | from config import process_num, daemon_flag, pid_file 12 | from log import logger 13 | from daemonize import daemonize 14 | from worker import kworker_handler, worker_handler 15 | 16 | 17 | jobs = {} 18 | is_running = True 19 | running_status = Value('d', True) 20 | 21 | 22 | #判断进程及lock是否存在 23 | def set_exists_pid(): 24 | continue_status = False 25 | if os.path.exists(pid_file): 26 | with open(pid_file, 'r') as f: 27 | pid = f.read() 28 | if len(pid) == 0: 29 | continue_status = True 30 | else: 31 | pid = int(pid) 32 | if check_status(pid): 33 | return False 34 | else: 35 | continue_status = True 36 | else: 37 | continue_status = True 38 | 39 | if continue_status: 40 | with open(pid_file, 'w') as f: 41 | logger.info('write pid %s'%os.getpid()) 42 | f.write(str(os.getpid())) 43 | return continue_status 44 | 45 | 46 | #接收信号,比如 kill,或者是键盘 ctrl c 47 | def sig_handler(num, stack): 48 | logger.info('receiving signal, exiting...') 49 | global is_running 50 | global running_status 51 | running_status.value = False 52 | is_running = False 53 | 54 | 55 | #添加进程 56 | def sig_add(num, stack): 57 | logger.info('receiving add signal, Add Process...') 58 | #res = fork_process(process_num) 59 | res = fork_process(1) 60 | jobs.update(res) 61 | 62 | 63 | #亲切的干掉一个进程 64 | def sig_reduce(num, stack): 65 | logger.info('receiving signal, Reduce Process...') 66 | for pid, pid_obj in jobs.iteritems(): 67 | jobs[pid]['is_running'] = False 68 | time.sleep(5) 69 | if pid_obj['obj'].is_alive(): 70 | pid_obj['obj'].terminate() 71 | # os.kill(pid, signal.SIGKILL) 72 | logger.info('receiving reduce signal,%s be killed'%pid) 73 | return 74 | 75 | 76 | #调用工作函数的入口 77 | def request_worker(func, process_name): 78 | setproctitle(process_name) #设置进程的名字 79 | # global running_status 80 | logger.info("child pid %s"%os.getpid()) 81 | # counter = 0 82 | while running_status.value: 83 | s = func() 84 | if s: #如果有返回值,那么判断该任务只想运行一次 85 | break 86 | 87 | 88 | #fork进程 89 | def fork_process(x): 90 | jobs = {} 91 | for i in xrange(x): 92 | detail = {} 93 | p = Process(target=request_worker, args=(worker_handler, "Monitor :worker")) 94 | p.start() 95 | detail['obj'] = p 96 | detail['is_running'] = True 97 | jobs[p.pid] = detail 98 | return jobs 99 | 100 | 101 | #探测一个进程的状态 102 | def check_status(pid): 103 | try: 104 | os.kill(pid, 0) 105 | return True 106 | except: 107 | return False 108 | 109 | 110 | #管理进程总控 111 | def spawn_worker(): 112 | # parent_id = os.getpid() 113 | p = Process(target=request_worker, args=(kworker_handler, "Monitor :kworker")) 114 | p.start() 115 | detail = {} 116 | detail['obj'] = p 117 | detail['is_running'] = True 118 | jobs[p.pid] = detail 119 | res = fork_process(process_num) 120 | jobs.update(res) 121 | while is_running: 122 | time.sleep(0.01) 123 | #第一种方法,调用非阻塞waitpid方法收尸 124 | if len(jobs) < process_num: 125 | res = fork_process(process_num-len(jobs)) 126 | jobs.update(res) 127 | for pid in jobs.keys(): 128 | try: 129 | if not check_status(pid): 130 | del jobs[pid] 131 | os.waitpid(pid, os.WNOHANG) 132 | except: 133 | pass 134 | else: 135 | _c = 0 136 | # interval = 0.1 137 | while 1: 138 | logger.info(str(_c)) 139 | logger.info(str(jobs)) 140 | if _c >= 30 or len(jobs) == 0: 141 | break 142 | for pid in jobs.keys(): 143 | if not check_status(pid): 144 | jobs.pop(pid) 145 | _c += 1 146 | time.sleep(0.1) 147 | for pid in jobs: 148 | try: 149 | os.kill(pid, signal.SIGKILL) 150 | except: 151 | pass 152 | os.remove(pid_file) 153 | 154 | 155 | if __name__ == '__main__': 156 | if daemon_flag: 157 | daemonize() 158 | 159 | if not set_exists_pid(): 160 | logger.error("service is alive") 161 | exit(0) 162 | 163 | setproctitle("Monitor :Master") 164 | signal.signal(signal.SIGINT, sig_handler) 165 | signal.signal(signal.SIGTERM, sig_handler) 166 | signal.signal(signal.SIGTTIN, sig_add) 167 | signal.signal(signal.SIGTTOU, sig_reduce) 168 | #第二种方法,直接忽视子进程退出前发出的sigchld信号,交给内核,让内核来收拾,其实也是让内核用waitpid来解决。 169 | signal.signal(signal.SIGCHLD, signal.SIG_IGN) 170 | logger.info('main process: %d start', os.getpid()) 171 | spawn_worker() 172 | logger.info('main: %d kill all jobs done', os.getpid()) 173 | -------------------------------------------------------------------------------- /worker.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding:utf-8 -*- 3 | import time 4 | 5 | from log import logger 6 | 7 | 8 | #你的业务逻辑 9 | def kworker_handler(): 10 | time.sleep(5) 11 | return True 12 | 13 | 14 | def worker_handler(): 15 | time.sleep(0.01) 16 | logger.info('this is worker_handler') 17 | 18 | 19 | ALLOW_METHOD = [{"func": kworker_handler, "counte": 1}, {"func": worker_handler, "count": 3}] 20 | --------------------------------------------------------------------------------