├── .gitignore ├── Example ├── Soft_WingIDE │ ├── CalcActivationCode.py │ └── README.md ├── create_file │ ├── config.ini │ └── test_create_file_fixedsize.py ├── learn_DirtConfig_footprint │ ├── README.md │ ├── build.py │ ├── config.py │ └── footprint.html ├── log_visual │ ├── 1_log.py │ ├── 2_log.py │ ├── README.md │ ├── doc │ │ ├── date.md │ │ ├── ip.md │ │ └── scripts │ │ │ └── month.sh │ ├── log_if_sort.py │ └── log_ip_AccessSort.py ├── python_base │ ├── README.md │ ├── exception │ │ ├── README.md │ │ └── exception.py │ ├── filemd5 │ │ ├── README.md │ │ └── filemd5.py │ ├── is │ │ └── README.md │ ├── re │ │ ├── README.md │ │ ├── reg.py │ │ └── zabbix_re.py │ └── urllib2 │ │ ├── README.md │ │ ├── urllib2_e1.py │ │ └── urllib2_e2.py ├── python_count │ ├── README.md │ └── py_count.py ├── python_interactive │ ├── README.md │ └── pythonstartup.sh ├── random_test │ ├── README.md │ ├── doc │ │ ├── test.txt │ │ └── user.txt │ └── random_test.py └── store_test │ ├── config.ini │ ├── test_check.py │ └── test_create_file.py ├── LICENSE ├── My_lib ├── Example │ ├── Agent │ │ ├── __init__.py │ │ ├── agent.py │ │ ├── agent.server │ │ ├── daemon.py │ │ ├── monitems.py │ │ ├── monitor.py │ │ ├── netlib │ │ │ ├── NetBase.py │ │ │ ├── NetUtils.py │ │ │ ├── __init__.py │ │ │ └── daemon.py │ │ └── run_daemon.py │ ├── README.md │ └── saver │ │ ├── SaverDB.py │ │ └── netlib │ │ ├── .gitignore │ │ ├── NetBase.py │ │ ├── NetUtils.py │ │ ├── __init__.py │ │ └── daemon.py ├── color │ └── color.py ├── command_utils │ ├── README.md │ ├── command.py │ ├── command_class.py │ └── command_simple.py ├── daemon │ ├── README.md │ ├── ceshi.py │ ├── run_daemon.py │ └── xlib │ │ ├── __init__.py │ │ └── daemon.py ├── easyrun │ ├── README.md │ └── easyrun.py ├── file_utils │ ├── README.md │ ├── config │ └── file_util.py ├── log_utils │ ├── README.md │ └── blog.py ├── monitor_process │ └── monitor.py ├── mpms │ ├── demo.py │ ├── mylib │ │ ├── BLog.py │ │ ├── __init__.py │ │ └── mpms.py │ └── readme.md ├── netlib │ ├── .gitignore │ ├── NetBase.py │ ├── NetUtils.py │ ├── README.md │ └── __init__.py ├── peewee │ ├── README.md │ └── user.py ├── progressbar │ ├── Progressbar.py │ └── Progressbar.py_bak ├── pymysql │ └── README.md ├── pytest │ └── README.md ├── query_ip │ ├── QQWry.Dat │ ├── README.md │ └── query_ip.py ├── redis-rdb-tools │ └── README.md ├── schema │ ├── README.md │ ├── demo.py │ ├── schema │ │ ├── __init__.py │ │ └── contextlib2.py │ └── test_schema.py ├── serverinfo_config │ ├── README.md │ ├── sc.py │ └── si.py ├── ttable │ ├── README.md │ └── ttable.py ├── validator │ └── README.md ├── w_mpms │ ├── demo.py │ ├── readme.md │ └── w_lib │ │ ├── BLog.py │ │ ├── __init__.py │ │ └── mpms.py └── xmltodict │ ├── README.md │ └── xmltodict.py └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | env/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | downloads/ 16 | eggs/ 17 | .eggs/ 18 | lib/ 19 | lib64/ 20 | parts/ 21 | sdist/ 22 | var/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | 27 | # PyInstaller 28 | # Usually these files are written by a python script from a template 29 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 30 | *.manifest 31 | *.spec 32 | 33 | # Installer logs 34 | pip-log.txt 35 | pip-delete-this-directory.txt 36 | 37 | # Unit test / coverage reports 38 | htmlcov/ 39 | .tox/ 40 | .coverage 41 | .coverage.* 42 | .cache 43 | nosetests.xml 44 | coverage.xml 45 | *,cover 46 | .hypothesis/ 47 | 48 | # Translations 49 | *.mo 50 | *.pot 51 | 52 | # Django stuff: 53 | *.log 54 | 55 | # Sphinx documentation 56 | docs/_build/ 57 | 58 | # PyBuilder 59 | target/ 60 | 61 | #Ipython Notebook 62 | .ipynb_checkpoints 63 | -------------------------------------------------------------------------------- /Example/Soft_WingIDE/CalcActivationCode.py: -------------------------------------------------------------------------------- 1 | import sha 2 | import string 3 | BASE2 = '01' 4 | BASE10 = '0123456789' 5 | BASE16 = '0123456789ABCDEF' 6 | BASE30 = '123456789ABCDEFGHJKLMNPQRTVWXY' 7 | BASE36 = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' 8 | BASE62 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz' 9 | BASEMAX = string.printable 10 | def BaseConvert(number, fromdigits, todigits, ignore_negative = True): 11 | """ converts a "number" between two bases of arbitrary digits 12 | 13 | The input number is assumed to be a string of digits from the 14 | fromdigits string (which is in order of smallest to largest 15 | digit). The return value is a string of elements from todigits 16 | (ordered in the same way). The input and output bases are 17 | determined from the lengths of the digit strings. Negative 18 | signs are passed through. 19 | 20 | decimal to binary 21 | >>> baseconvert(555,BASE10,BASE2) 22 | '1000101011' 23 | 24 | binary to decimal 25 | >>> baseconvert('1000101011',BASE2,BASE10) 26 | '555' 27 | 28 | integer interpreted as binary and converted to decimal (!) 29 | >>> baseconvert(1000101011,BASE2,BASE10) 30 | '555' 31 | 32 | base10 to base4 33 | >>> baseconvert(99,BASE10,"0123") 34 | '1203' 35 | 36 | base4 to base5 (with alphabetic digits) 37 | >>> baseconvert(1203,"0123","abcde") 38 | 'dee' 39 | 40 | base5, alpha digits back to base 10 41 | >>> baseconvert('dee',"abcde",BASE10) 42 | '99' 43 | 44 | decimal to a base that uses A-Z0-9a-z for its digits 45 | >>> baseconvert(257938572394L,BASE10,BASE62) 46 | 'E78Lxik' 47 | 48 | ..convert back 49 | >>> baseconvert('E78Lxik',BASE62,BASE10) 50 | '257938572394' 51 | 52 | binary to a base with words for digits (the function cannot convert this back) 53 | >>> baseconvert('1101',BASE2,('Zero','One')) 54 | 'OneOneZeroOne' 55 | 56 | """ 57 | if not ignore_negative and str(number)[0] == '-': 58 | number = str(number)[1:] 59 | neg = 1 60 | else: 61 | neg = 0 62 | x = long(0) 63 | for digit in str(number): 64 | x = x * len(fromdigits) + fromdigits.index(digit) 65 | 66 | res = '' 67 | while x > 0: 68 | digit = x % len(todigits) 69 | res = todigits[digit] + res 70 | x /= len(todigits) 71 | 72 | if neg: 73 | res = '-' + res 74 | return res 75 | 76 | def SHAToBase30(digest): 77 | """Convert from a hexdigest form SHA hash into a more compact and 78 | ergonomic BASE30 representation. This results in a 17 'digit' 79 | number.""" 80 | tdigest = ''.join([ c for i, c in enumerate(digest) if i / 2 * 2 == i ]) 81 | result = BaseConvert(tdigest, BASE16, BASE30) 82 | while len(result) < 17: 83 | result = '1' + result 84 | 85 | return result 86 | def AddHyphens(code): 87 | """Insert hyphens into given license id or activation request to 88 | make it easier to read""" 89 | return code[:5] + '-' + code[5:10] + '-' + code[10:15] + '-' + code[15:] 90 | 91 | LicenseID='CN123-12345-12345-12345' 92 | #Copy the Request Code from the dialog 93 | RequestCode='RL539-XMGG2-A3DBA-LKA8J' 94 | hasher = sha.new() 95 | hasher.update(RequestCode) 96 | hasher.update(LicenseID) 97 | digest = hasher.hexdigest().upper() 98 | lichash = RequestCode[:3] + SHAToBase30(digest) 99 | lichash=AddHyphens(lichash) 100 | 101 | #Calculate the Activation Code 102 | data=[7,123,23,87] 103 | tmp=0 104 | realcode='' 105 | for i in data: 106 | for j in lichash: 107 | tmp=(tmp*i+ord(j))&0xFFFFF 108 | realcode+=format(tmp,'=05X') 109 | tmp=0 110 | 111 | act30=BaseConvert(realcode,BASE16,BASE30) 112 | while len(act30) < 17: 113 | act30 = '1' + act30 114 | act30='AXX'+act30 115 | act30=AddHyphens(act30) 116 | print "The Activation Code is: "+act30 117 | -------------------------------------------------------------------------------- /Example/Soft_WingIDE/README.md: -------------------------------------------------------------------------------- 1 | # wingIDE 2 | ## the license id 3 | CN123-12345-12345-12345 4 | ## the request code 5 | we use the request code create the Activation code 6 | 7 | 8 | -------------------------------------------------------------------------------- /Example/create_file/config.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | # Sub-directory numbers 3 | DIR_PATH_NUM = 1 4 | # File numbers in each sub-directory 5 | FILE_NUM = 3 6 | # Path to write file 7 | PATH_STORE = /tmp/test/ 8 | # Path to record logs 9 | PATH_LOG_RECORD = /tmp/record/ 10 | # file size(kb) 11 | FILE_SIZE_KB = 1024 12 | -------------------------------------------------------------------------------- /Example/create_file/test_create_file_fixedsize.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | #coding=utf8 3 | """ 4 | # Author: meetbill 5 | # Created Time : 2017-03-07 22:59:17 6 | 7 | # File Name: test_create_file_fixedsize.py 8 | # Description: 9 | 10 | """ 11 | import os 12 | import random 13 | import hashlib 14 | import threading 15 | import ConfigParser 16 | 17 | config = ConfigParser.ConfigParser() 18 | config.read('./config.ini') 19 | 20 | DIR_PATH_NUM= int(config.get('default', 'DIR_PATH_NUM')) 21 | FILE_NUM = int(config.get('default','FILE_NUM')) 22 | PATH_STORE = config.get('default','PATH_STORE') 23 | PATH_LOG_RECORD = config.get('default','PATH_LOG_RECORD') 24 | FILE_SIZE_KB = int(config.get('default','FILE_SIZE_KB')) 25 | 26 | def log(path, data, suffix="log"): 27 | fout = open(path + "md5log." + suffix, "a") 28 | fout.write(data) 29 | fout.write("\n") 30 | 31 | def get_md5(filename): 32 | if not os.path.isfile(filename): 33 | return 34 | myhash = hashlib.md5() 35 | f = file(filename, 'rb') 36 | while True: 37 | b = f.read(8096) 38 | if not b : 39 | break 40 | myhash.update(b) 41 | f.close() 42 | return myhash.hexdigest() 43 | 44 | 45 | def write_file(path_store, file_index, dir_index): 46 | 47 | ceshi_path = path_store + "/" + dir_index + "/" 48 | if not os.path.exists(ceshi_path): 49 | os.makedirs(os.path.dirname(ceshi_path)) 50 | 51 | ceshi_full_path = ceshi_path + file_index 52 | 53 | 54 | data=(str(random.randint(0,1024)).zfill(16)+ str(random.randint(0,1024)).zfill(16))* 32 * FILE_SIZE_KB 55 | #data = (str(8).zfill(8)) * random.randint(0, 128 * 1024 * MAX_FIlE_SIZE_MB) 56 | #data = (str(8).zfill(8)) * random.randint(0, 128 * 1024 * MAX_FIlE_SIZE_MB) 57 | 58 | fout = open(ceshi_full_path, "w") 59 | fout.write(data) 60 | fout.flush() 61 | md5_ceshifile = get_md5(ceshi_full_path) 62 | os.rename(os.path.join(ceshi_path, file_index), os.path.join(ceshi_path,md5_ceshifile)) 63 | 64 | log_data = md5_ceshifile 65 | log(PATH_LOG_RECORD, log_data, dir_index) 66 | print dir_index, file_index, log_data 67 | 68 | 69 | def write_files(path_store, file_num, dir_index): 70 | 71 | for i in xrange(0, file_num): 72 | write_file(path_store, str(i), dir_index) 73 | 74 | def writ_files_mutilthread(path_store, file_num): 75 | 76 | if not os.path.exists(PATH_LOG_RECORD): 77 | os.makedirs(os.path.dirname(PATH_LOG_RECORD)) 78 | 79 | threads = [] 80 | 81 | for i in xrange(0, DIR_PATH_NUM): 82 | dir_index = str(i); 83 | t = threading.Thread(target=write_files, args=(path_store, file_num, dir_index)) 84 | t.start() 85 | threads.append(t) 86 | 87 | for t in threads: 88 | t.join() 89 | 90 | if __name__ == '__main__': 91 | writ_files_mutilthread(PATH_STORE, FILE_NUM) 92 | -------------------------------------------------------------------------------- /Example/learn_DirtConfig_footprint/README.md: -------------------------------------------------------------------------------- 1 | # 记录去过的足迹 2 | 3 | 4 | ### python版本 5 | 6 | * 下载本项目,进入到python目录下,有两个文件,config是你需要修改的. 7 | * 项目依赖于requests模块 需要pip安装 8 | 9 | ``` 10 | config={ 11 | 'title':'去过的地方', 12 | 'subtitle':'北京 昆明 西北 呼和浩特', 13 | 'foot':[ 14 | '北京 昆明 丽江 香格里拉 丽江 昆明 北京', 15 | ] 16 | } 17 | 18 | ``` 19 | 20 | * 修改上面这个配置里的title(标题),subtitle(副标题)和foot(行程) 21 | * foot是一个数组,每个元素是一个行程,目的地(景点)之间用空格分开 22 | * 执行 python build.py ,会生成一个footprint.html,大功告成,浏览器打开看效果吧 23 | * 谢谢大家,开发时间很短,如有bug 请轻喷 24 | 25 | 26 | ### 彩色版本和定制区域 27 | 28 | * config加一个color变量,就会把足迹线变成彩色,如下 29 | 30 | ``` 31 | config={ 32 | 'title':'去过的地方', 33 | 'subtitle':'北京 昆明 西北 呼和浩特', 34 | 'color':True, 35 | 'foot':[ 36 | '北京 昆明 丽江 香格里拉 丽江 昆明 北京', 37 | '霍营地铁站 布达拉宫', 38 | '北京 北戴河 北京', 39 | '北京 兰州 敦煌 张掖 祁连 西宁 青海湖 茶卡盐湖 西宁 银川 呼和浩特 北京' 40 | ] 41 | } 42 | ``` 43 | * 如果你只在北京内部玩,或者定制一个北京旅游计划,可以加一个region字段,如下 44 | 45 | ``` 46 | config={ 47 | 'title':'北京去过的地方', 48 | 'subtitle':'走啊走', 49 | 'color':True, 50 | 'region':'北京', 51 | 'foot':[ 52 | '北京交通大学 霍营地铁站 古北水镇', 53 | '北京交通大学 八达岭 北京交通大学', 54 | '北京交通大学 妙峰山 潭柘寺' 55 | 56 | ] 57 | } 58 | ``` 59 | -------------------------------------------------------------------------------- /Example/learn_DirtConfig_footprint/build.py: -------------------------------------------------------------------------------- 1 | #encoding:utf-8 2 | import requests 3 | import os,sys 4 | import config 5 | reload(sys) 6 | sys.setdefaultencoding("utf-8") 7 | import requests 8 | import json 9 | import random 10 | #{{{html 11 | htmlStr = ''' 12 | 13 | 14 | 15 | 16 | %(title)s 17 | 18 | 19 | 20 |
21 | 22 | 124 | 125 | 126 | 127 | '''.replace('\n','') 128 | # .replace('<','<').replace('>','>') 129 | 130 | #}}} 131 | 132 | class FootPrint(): 133 | # get config 134 | def __init__(self,config): 135 | self.key = 'q5mTrTGzCSVq5QmGpI9y18Bo' 136 | self.url = 'http://api.map.baidu.com/geocoder/v2/?output=json&ak=%s&address=' % (self.key) 137 | # dirt.get('key',default=None) 138 | self.title = config.get('title','2015年行程') 139 | print self.title 140 | self.color = config.get('color',None) 141 | self.subtitle = config.get('subtitle') 142 | print self.subtitle 143 | self.data = config['foot'] 144 | self.region = config.get('region','china') 145 | self.alldata = {} 146 | self.linedata = [] 147 | self.pointdata = [] 148 | self.cache = {} 149 | def processData(self): 150 | # self.data is a list 151 | for route in self.data: 152 | # route is a spring 153 | print route 154 | # spring.split --> list tmp 155 | tmp = route.split() 156 | for t in tmp: 157 | # print t 158 | # self.cache is a dict 159 | self.cache[t] = self.getValue() 160 | if t not in self.alldata: 161 | self.getPoint(t) 162 | 163 | for i in range(len(tmp)-1): 164 | val = self.getValue() 165 | self.linedata.append([{'name':tmp[i]},{"name":tmp[i+1],'value':self.cache[tmp[i+1]]}]) 166 | for name in self.alldata: 167 | self.pointdata.append({'name':name,'value':self.cache[name]}) 168 | 169 | def getPoint(self,name): 170 | url = self.url+name 171 | try: 172 | r = requests.get(url) 173 | res = r.json() 174 | if res.get('result',None): 175 | loc = res['result']['location'] 176 | self.alldata[name] = [loc['lng'],loc['lat']] 177 | except: 178 | print '获取不到%s的经纬度信息'%(name) 179 | def writeFile(self): 180 | obj = { 181 | 'title':self.title, 182 | 'subtitle':self.subtitle, 183 | 'region':self.region, 184 | 'alldata':json.dumps(self.alldata), 185 | 'linedata':json.dumps(self.linedata), 186 | 'pointdata':json.dumps(self.pointdata) 187 | } 188 | 189 | with open('footprint.html','w') as f: 190 | f.write(htmlStr % obj) 191 | print '成功生成文件,打开看看吧!' 192 | def getValue(self): 193 | if self.color: 194 | return random.randint(0,100) 195 | else: 196 | return 1 197 | def start(self): 198 | self.processData() 199 | self.writeFile() 200 | 201 | if __name__ =="__main__": 202 | F = FootPrint(config.config) 203 | F.start() 204 | -------------------------------------------------------------------------------- /Example/learn_DirtConfig_footprint/config.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | #dict 4 | config={ 5 | 'title':'行程', 6 | 'subtitle':'北京 山西 山东 天津 湖北 海南 内蒙古 陕西 福建 广东 河北', 7 | 'color':'true', 8 | 'foot':[ 9 | '太原 北京 太原', 10 | '北京 呼和浩特 北京', 11 | '长治 新乡 临沂 新乡 长治', 12 | '北京 陕西 北京', 13 | '临沂 青岛 济南 天津 北京', 14 | '北京 武汉 孝感 武汉 北京', 15 | '北京 武汉 咸宁 北京', 16 | '太原 海口 太原', 17 | '北京 厦门 深圳 佛山 广州 北京', 18 | '北京 秦皇岛 北京' 19 | ] 20 | } 21 | -------------------------------------------------------------------------------- /Example/learn_DirtConfig_footprint/footprint.html: -------------------------------------------------------------------------------- 1 | 行程
-------------------------------------------------------------------------------- /Example/log_visual/1_log.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/meetbill/MyPythonLib/fd58b8a112ad366fba67771cafd752f0b8f378ee/Example/log_visual/1_log.py -------------------------------------------------------------------------------- /Example/log_visual/2_log.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/meetbill/MyPythonLib/fd58b8a112ad366fba67771cafd752f0b8f378ee/Example/log_visual/2_log.py -------------------------------------------------------------------------------- /Example/log_visual/README.md: -------------------------------------------------------------------------------- 1 | # Learn log visual 2 | 3 | ## (1) 1_log.py 4 | Using the command analysis log 5 | ## (2) 2_log.py 6 | Using the command analysis log,onle show the informance that return_info is not 'OK'. 7 | 8 | ## 日志查询知识储备 9 | 10 | [date](doc/date.md) 11 | 12 | [IP合法性校验](doc/ip.md) 13 | -------------------------------------------------------------------------------- /Example/log_visual/doc/date.md: -------------------------------------------------------------------------------- 1 | # date 2 | 3 | ## 批量输出一段日期内的每一天 4 | 5 | ``` 6 | #!/bin/bash 7 | datebeg=$1 8 | dateend=$2 9 | #read datebeg 10 | #read dateend 11 | beg_s=`date -d "$datebeg" +%s` 12 | end_s=`date -d "$dateend" +%s` 13 | while [ "$beg_s" -lt "$end_s" ] 14 | do 15 | DATE_ONE=`date -d @$beg_s +"%Y-%m-%d"` 16 | echo ${DATE_ONE} 17 | beg_s=$((beg_s+86400)) 18 | done 19 | ``` 20 | 下载 21 | 22 | ``` 23 | #curl -o month.sh "https://raw.githubusercontent.com/BillWang139967/python_learn/master/log_visual/doc/scripts/month.sh" 24 | #sh month.sh 20160701 20160801 25 | ``` 26 | -------------------------------------------------------------------------------- /Example/log_visual/doc/ip.md: -------------------------------------------------------------------------------- 1 | # IP地址合法性 2 | 3 | 正确的IP地址格式 4 | 5 | > * 以点号为分隔的域为4或6(IPV6); 6 | > * 除点号外全部为数字; 7 | > * 第一个和最后一个字节不能为0; 8 | > * 所有字节的数值不能超过255; 9 | > * 最后一个字节不能为255;(这是子网广播地址) 10 | 11 | 12 | ## grep 方式 13 | 14 | ``` 15 | a=192.168.1.1 16 | echo $a |grep "^[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}$"|grep -v "\.0[0-9]\{1,2\}"|grep -v "^0[0-9]\{1,2\}" || return 1 17 | ``` 18 | 19 | ## awk 方式 20 | 21 | ``` 22 | a=192.168.1.1 23 | echo $a |awk -F. '{if(NF==4 && $1>0 && $1<=255 && $2>=0 && $2<=255 && $3>=0 && $3<=255 && $4>0 && $4<255) exit 0; else exit 1}' 24 | ``` 25 | -------------------------------------------------------------------------------- /Example/log_visual/doc/scripts/month.sh: -------------------------------------------------------------------------------- 1 | ######################################################################### 2 | # File Name: month.sh 3 | # Author: Bill 4 | # mail: XXXXXXX@qq.com 5 | # Created Time: 2016-08-26 12:13:52 6 | ######################################################################### 7 | #!/bin/bash 8 | datebeg=$1 9 | dateend=$2 10 | #read datebeg 11 | #read dateend 12 | beg_s=`date -d "$datebeg" +%s` 13 | end_s=`date -d "$dateend" +%s` 14 | while [ "$beg_s" -lt "$end_s" ] 15 | do 16 | DATE_ONE=`date -d @$beg_s +"%Y-%m-%d"` 17 | echo ${DATE_ONE} 18 | beg_s=$((beg_s+86400)) 19 | done 20 | -------------------------------------------------------------------------------- /Example/log_visual/log_if_sort.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | #coding=utf8 3 | """ 4 | # Author: Bill 5 | # Created Time : 2016-06-02 23:26:54 6 | 7 | # File Name: log_if_sort.py 8 | # Description:针对某个请求根据访问次数将每个IP排序 9 | 10 | """ 11 | import re 12 | f = open('./acc.log') 13 | res = {} 14 | sum=0 15 | for l in f: 16 | try: 17 | arr = re.split(' |\t',l) 18 | # 获取ip url 和status 19 | module = arr[3] 20 | ip = arr[4] 21 | status = arr[7] 22 | interface = arr[8] 23 | 24 | #return_info = arr[9] 25 | if interface != 'getUserQuota': 26 | continue 27 | # ip url 和status当key,每次统计+1 28 | res[(ip)] = res.get((ip),0)+1 29 | except: 30 | print l 31 | # 生成一个临时的list 32 | res_list = [(k,v) for k,v in res.items()] 33 | # 按照统计数量排序 34 | for k in sorted(res_list,key=lambda x:x[1],reverse=True): 35 | sum=sum+1 36 | print k 37 | 38 | print sum 39 | -------------------------------------------------------------------------------- /Example/log_visual/log_ip_AccessSort.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | #coding=utf8 3 | """ 4 | # Author: Bill 5 | # Created Time : 2016-06-02 23:17:12 6 | 7 | # File Name: log_ip_AccessSort.py 8 | # Description:根据访问请求将每个IP排序 9 | 10 | """ 11 | import re 12 | f = open('./acc.log') 13 | res = {} 14 | sum=0 15 | for l in f: 16 | try: 17 | arr = re.split(' |\t',l) 18 | # 获取ip url 和status 19 | module = arr[3] 20 | ip = arr[4] 21 | status = arr[7] 22 | interface = arr[8] 23 | 24 | #return_info = arr[9] 25 | # ip url 和status当key,每次统计+1 26 | res[(ip)] = res.get((ip),0)+1 27 | except: 28 | print l 29 | # 生成一个临时的list 30 | res_list = [(k,v) for k,v in res.items()] 31 | # 按照统计数量排序 32 | for k in sorted(res_list,key=lambda x:x[1],reverse=True): 33 | sum=sum+1 34 | print k 35 | 36 | print sum 37 | -------------------------------------------------------------------------------- /Example/python_base/README.md: -------------------------------------------------------------------------------- 1 | 2 | * 异常处理 [exception](./exception/) 3 | * [md5](./filemd5/) 4 | * http 请求 [urllib2](./urllib2) 5 | * 正则表达式 [re](./re/README.md) 6 | * Python 中 [is 和 == 的区别](./is/README.md) 7 | -------------------------------------------------------------------------------- /Example/python_base/exception/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | * [exception](#exception) 4 | * [str(e)](#stre) 5 | * [repr(e)](#repre) 6 | * [e.message](#emessage) 7 | * [采用 traceback 模块](#采用-traceback-模块) 8 | * [推荐](#推荐) 9 | 10 | 11 | # exception 12 | 13 | ## str(e) 14 | 15 | 返回字符串类型,只给出异常信息,不包括异常信息的类型,如 1/0 的异常信息 16 | 17 | 'integer division or modulo by zero' 18 | 19 | ## repr(e) 20 | 21 | 给出较全的异常信息,包括异常信息的类型,如 1/0 的异常信息 22 | 23 | "ZeroDivisionError('integer division or modulo by zero',)" 24 | 25 | ## e.message 26 | 27 | 获得的信息同 str(e) 28 | 29 | ## 采用 traceback 模块 30 | 31 | 需要导入 traceback 模块,此时获取的信息最全,与 python 命令行运行程序出现错误信息一致。使用 traceback.print_exc() 打印异常信息到标准错误,就像没有获取一样,或者使用 traceback.format_exc() 将同样的输出获取为字符串。你可以向这些函数传递各种各样的参数来限制输出,或者重新打印到像文件类型的对象。 32 | 33 | # 推荐 34 | 35 | 推荐使用 traceback 模块 36 | -------------------------------------------------------------------------------- /Example/python_base/exception/exception.py: -------------------------------------------------------------------------------- 1 | import traceback 2 | 3 | print '########################################################' 4 | print "1/0 Exception Info" 5 | print '---------------------------------------------------------' 6 | try: 7 | 1/0 8 | except Exception, e: 9 | print '_____________________________________________________' 10 | print 'str(Exception):\t', str(Exception) 11 | print 12 | print '_____________________________________________________' 13 | print 'str(e):\t\t', str(e) 14 | print 15 | print '_____________________________________________________' 16 | print 'repr(e):\t', repr(e) 17 | print 18 | print '_____________________________________________________' 19 | print 'e.message:\t', e.message 20 | print 21 | print '_____________________________________________________' 22 | print 'traceback.print_exc():'; traceback.print_exc() 23 | print 24 | print '_____________________________________________________' 25 | print 'traceback.format_exc():\n%s' % traceback.format_exc() 26 | print '########################################################' 27 | -------------------------------------------------------------------------------- /Example/python_base/filemd5/README.md: -------------------------------------------------------------------------------- 1 | # filemd5 2 | 3 | ## (1) python filemd5 filename 4 | Useing the hashlib 5 | -------------------------------------------------------------------------------- /Example/python_base/filemd5/filemd5.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | try: 3 | import hashlib,sys 4 | except Exception,e: 5 | print e 6 | sys.exit(1) 7 | 8 | def main(filename): 9 | m = hashlib.md5() 10 | fp=open(filename, 'rb') 11 | while True: 12 | blk = fp.read(4096) # 4KB per block 13 | if not blk: break 14 | m.update(blk) 15 | return m.hexdigest() 16 | 17 | def md5new(filename): 18 | return hashlib.md5(open(filename).read()).hexdigest() 19 | 20 | if __name__ == '__main__': 21 | try: 22 | print main(sys.argv[1]) 23 | print md5new(sys.argv[1]) 24 | except: 25 | sys.exit('Usage: %s file' % sys.argv[0]) 26 | -------------------------------------------------------------------------------- /Example/python_base/is/README.md: -------------------------------------------------------------------------------- 1 | ## Python 中 is 和 == 的区别 2 | 3 | 4 | * [Python 中对象包含的三个基本要素](#python-中对象包含的三个基本要素) 5 | * [is 和 == 区别](#is-和--区别) 6 | * [== 用来比较判断两个对象的 value(值)是否相等](#-用来比较判断两个对象的-value值是否相等) 7 | * [is 用来判断对象的 id 是否相等](#is-用来判断对象的-id-是否相等) 8 | 9 | 10 | 11 | ## Python 中对象包含的三个基本要素 12 | 13 | Python 中有很多运算符,今天我们就来讲讲 is 和 == 两种运算符在应用上的本质区别是什么。 14 | 15 | 在讲 is 和 == 这两种运算符区别之前,首先要知道 Python 中对象包含的三个基本要素,分别是:id(身份标识)、type(数据类型)和 value(值)。 16 | 17 | ## is 和 == 区别 18 | 19 | is 和 == 都是对对象进行比较判断作用的,但对对象比较判断的内容并不相同。下面来看看具体区别在哪。 20 | 21 | == 比较操作符和 is 同一性运算符区别 22 | 23 | ### == 用来比较判断两个对象的 value(值)是否相等 24 | 25 | == 是 python 标准操作符中的比较操作符,用来比较判断两个对象的 value(值)是否相等,例如下面两个字符串间的比较: 26 | 27 | 例 1. 28 | 29 | ``` 30 | >>> a = 'cheesezh' 31 | >>> b = 'cheesezh' 32 | >>> a == b 33 | True 34 | ``` 35 | 36 | ### is 用来判断对象的 id 是否相等 37 | 38 | is 也被叫做同一性运算符,这个运算符比较判断的是对象间的唯一身份标识,也就是 id 是否相同。通过对下面几个 list 间的比较,你就会明白 is 同一性运算符的工作原理: 39 | 40 | 例 2. 41 | 42 | ``` 43 | >>> x = y = [4,5,6] 44 | >>> z = [4,5,6] 45 | >>> x == y 46 | True 47 | >>> x == z 48 | True 49 | >>> x is y 50 | True 51 | >>> x is z 52 | False 53 | >>> 54 | >>> print id(x) 55 | 3075326572 56 | >>> print id(y) 57 | 3075326572 58 | >>> print id(z) 59 | 3075328140 60 | ``` 61 | 前三个例子都是 True,这什么最后一个是 False 呢?x、y 和 z 的值是相同的,所以前两个是 True 没有问题。至于最后一个为什么是 False,看看三个对象的 id 分别是什么就会明白了。 62 | 63 | 下面再来看一个例子,例 3 中同一类型下的 a 和 b 的(a==b)都是为 True,而(a is b)则不然。 64 | 65 | 例 3. 66 | 67 | ``` 68 | >>> a = 1 #a 和 b 为数值类型 69 | >>> b = 1 70 | >>> a is b 71 | True 72 | >>> id(a) 73 | 14318944 74 | >>> id(b) 75 | 14318944 76 | >>> a = 'cheesezh' #a 和 b 为字符串类型 77 | >>> b = 'cheesezh' 78 | >>> a is b 79 | True 80 | >>> id(a) 81 | 42111872 82 | >>> id(b) 83 | 42111872 84 | >>> a = (1,2,3) #a 和 b 为元组类型 85 | >>> b = (1,2,3) 86 | >>> a is b 87 | False 88 | >>> id(a) 89 | 15001280 90 | >>> id(b) 91 | 14790408 92 | >>> a = [1,2,3] #a 和 b 为 list 类型 93 | >>> b = [1,2,3] 94 | >>> a is b 95 | False 96 | >>> id(a) 97 | 42091624 98 | >>> id(b) 99 | 42082016 100 | >>> a = {'cheese':1,'zh':2} #a 和 b 为 dict 类型 101 | >>> b = {'cheese':1,'zh':2} 102 | >>> a is b 103 | False 104 | >>> id(a) 105 | 42101616 106 | >>> id(b) 107 | 42098736 108 | >>> a = set([1,2,3])#a 和 b 为 set 类型 109 | >>> b = set([1,2,3]) 110 | >>> a is b 111 | False 112 | >>> id(a) 113 | 14819976 114 | >>> id(b) 115 | 14822256 116 | 117 | ``` 118 | 119 | 通过例 3 可看出,只有数值型和字符串型的情况下,a is b 才为 True,当 a 和 b 是 tuple,list,dict 或 set 型时,a is b 为 False。 120 | -------------------------------------------------------------------------------- /Example/python_base/re/reg.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | #coding=utf8 3 | import re 4 | #---------------------正则-匹配--------------------- 5 | print(re.match(r'^\d{3}-\d{3,8}$', '010-85112365')) 6 | print(re.match(r'^\d{3}\-\d{3,8}$', '0108511265')) 7 | 8 | #---------------------正则-分割--------------------- 9 | print (''''a b c'.split(' '):''','a b c'.split(' ')) 10 | print ('''re.split(r'\s+', 'a b c'):''', re.split(r'\s+', 'a b c')) 11 | print (re.split(r'[\s\,]+', 'a,b, c d')) 12 | print (re.split(r'[\s|\,]+', 'a,b, c d')) 13 | print (re.split(r'[\s\,\;]+', 'a,b;; c d')) 14 | 15 | #---------------------正则-分组--------------------- 16 | m = re.match(r'^(\d{3})-(\d{3,8})$', '010-123456') 17 | print (m) 18 | print (m.group(0)) 19 | print (m.group(1)) 20 | print (m.group(2)) 21 | 22 | #---------------------正则-贪婪匹配--------------------- 23 | print (re.match(r'^(\d+)(0*)$', '102300').groups()) 24 | print (re.match(r'^(\d+?)(0*)$', '102300').groups()) 25 | -------------------------------------------------------------------------------- /Example/python_base/re/zabbix_re.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | #coding=utf8 3 | """ 4 | # Author: meetbill 5 | # Created Time : 2017-08-30 20:54:25 6 | 7 | # File Name: zabbix_re.py 8 | # Description: 9 | 10 | """ 11 | import re 12 | # vfs.fs.size[/,pfree] 13 | #subject="vfs.[]" 14 | #regex="vfs.\[\]$" 15 | subject_list=["vfs.fs.size[/,pfree]","vfs.fs.size[/home,pfree]"] 16 | regex="vfs.fs.size\[.*,pfree\]$" 17 | regex1="vfs.fs.size\[.,pfree\]$" 18 | 19 | 20 | print "regex:-------------------------",regex 21 | for subject in subject_list: 22 | if re.match(regex, subject): 23 | print subject,"OK" 24 | else: 25 | print subject,"ERR" 26 | 27 | print "regex:-------------------------",regex1 28 | for subject in subject_list: 29 | if re.match(regex1, subject): 30 | print subject,"OK" 31 | else: 32 | print subject,"ERR" 33 | -------------------------------------------------------------------------------- /Example/python_base/urllib2/README.md: -------------------------------------------------------------------------------- 1 | # 异常处理 2 | 3 | 4 | * [URLError 异常](#urlerror-异常) 5 | * [HTTPError](#httperror) 6 | * [捕获异常](#捕获异常) 7 | 8 | 9 | 10 | 当 urlopen() 不能处理响应时会引起 URLError 异常。HTTPError 异常是 URLError 的一个子类,只有在访问 HTTP 类型的 URL 时才会引起。 11 | 12 | ## URLError 异常 13 | 14 | 通常引起 URLError 的原因是: 15 | 16 | * 无网络连接(没有到目标服务器的路由) 17 | * 访问的目标服务器不存在 18 | 19 | 在这种情况下,异常对象会有 reason 属性(是一个(错误码、错误原因)的元组) 20 | 21 | ## HTTPError 22 | 23 | 每一个从服务器返回的 HTTP 响应都有一个状态码。其中,有的状态码表示服务器不能完成相应的请求,默认的处理程序可以为我们处理一些这样的状态码(如返回的响应是重定向,urllib2 会自动为我们从重定向后的页面中获取信息)。有些状态码,urllib2 模块不能帮我们处理,那么 urlopen 函数就会引起 HTTPError 异常,其中典型的有 404/401。 24 | HTTPError 异常的实例有整数类型的 code 属性,表示服务器返回的错误状态码。 25 | urllib2 模块默认的处理程序可以处理重定向(状态码是 300 范围),而且状态码在 100-299 范围内表示成功。因此,能够引起 HTTPError 异常的状态码范围是:400-599. 26 | 当引起错误时,服务器会返回 HTTP 错误码和错误页面。你可以将 HTPError 实例作为返回页面,这意味着,HTTPError 实例不仅有 code 属性,还有 read、geturl、info 等方法。 27 | 28 | ## 捕获异常 29 | 30 | HTTPError 必须排在 URLError 的前面,因为 HTTPError 是 URLError 的子类对象,在网访问中引发的所有异常要么是 URLError 类要么是其子类,如果我们将 URLError 排在 HTTPError 的前面,那么将导致 HTTPError 异常将永远不会被触发,因为 Python 在捕获异常时是按照从前往后的顺序挨个匹配的 31 | -------------------------------------------------------------------------------- /Example/python_base/urllib2/urllib2_e1.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | #coding=utf8 3 | """ 4 | # Author: meetbill 5 | # Created Time : 2017-08-31 16:09:18 6 | 7 | # File Name: wwww.py 8 | # Description: 9 | 10 | """ 11 | import urllib2 12 | 13 | url = 'http://xxxx.cm/ping' 14 | response = None 15 | try: 16 | response = urllib2.urlopen(url,timeout=3) 17 | print "xxxxxxxxx" 18 | except urllib2.URLError as e: 19 | if hasattr(e, 'code'): 20 | print 'Error code:',e.code 21 | elif hasattr(e, 'reason'): 22 | print 'Reason:',e.reason 23 | 24 | #print response.code 25 | -------------------------------------------------------------------------------- /Example/python_base/urllib2/urllib2_e2.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | #coding=utf8 3 | """ 4 | # Author: meetbill 5 | # Created Time : 2017-09-03 23:27:32 6 | 7 | # File Name: wwwb.py 8 | # Description: 9 | 10 | """ 11 | import urllib2 12 | 13 | url = "http://xxxxx.com/xxx" 14 | try: 15 | response = urllib2.urlopen(url,timeout=3) 16 | # print response.read( ) 17 | response.close( ) 18 | # HTTPError必须排在URLError的前面 19 | # 因为HTTPError是URLError的子类对象 20 | # 在网访问中引发的所有异常要么是URLError类要么是其子类 21 | # 如果我们将URLError排在HTTPError的前面,那么将导致HTTPError异常将永远不会被触发 22 | # 因为Python在捕获异常时是按照从前往后的顺序挨个匹配的 23 | print "xxxxxx" 24 | except urllib2.HTTPError, e: 25 | print "The server couldn't fulfill the request" 26 | print "Error code:", e.code 27 | if e.code == 404: 28 | print "Page not found!" 29 | #do someting 30 | elif e.code == 403: 31 | print "Access denied!" 32 | #do someting 33 | else: 34 | print "Something happened! Error code", e.code 35 | #print "Return content:", e.read() 36 | except urllib2.URLError, e: 37 | print "Failed to reach the server" 38 | print "The reason:", e.reason 39 | -------------------------------------------------------------------------------- /Example/python_count/README.md: -------------------------------------------------------------------------------- 1 | ## 统计某个文件夹中代码行数 2 | 3 | 4 | 5 | * [usage](#usage) 6 | * [思路](#思路) 7 | * [统计 python 文件中的代码,注释,空白对应的行数](#统计-python-文件中的代码注释空白对应的行数) 8 | 9 | 10 | 11 | ## usage 12 | ``` 13 | python ./py_count.py count dir_name 14 | ``` 15 | ## 思路 16 | ### 统计 python 文件中的代码,注释,空白对应的行数 17 | 18 | 其实代码和空白行很好统计,难点是注释行 19 | ``` 20 | python 中的注释分为以#开头的单行注释 21 | 22 | 或者以'''开头以'''结尾 或以"""开头以"""结尾的文档注释,如: 23 | 24 | ''' 25 | 26 | hello world 27 | 28 | '''和 29 | 30 | ''' 31 | 32 | hello world''' 33 | ``` 34 | 35 | 思路是用 is_comment 记录是否存在多行注释,如果不存在,则判断当前行是否以'''开头,是则将 is_comment 设为 True, 否则进行空行、当前行注释以及代码行的判断, 36 | 37 | 如果 is_comment 已经为 True 即,多行注释已经开始,则判断当前行是否以'''结尾,是则将 is_comment 设为 False, 同时增加注释的行数。表示多行注释已经结束,反之继续,此时多行注释还未结束 38 | -------------------------------------------------------------------------------- /Example/python_count/py_count.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # coding=utf8 3 | """ 4 | # Author: meetbill 5 | # Created Time : 2019-07-13 12:38:31 6 | 7 | # File Name: py_count.py 8 | # Description: 9 | 10 | #统计代码量,显示离10W行代码还有多远 11 | #递归搜索各个文件夹 12 | #显示各个类型的源文件和源代码数量 13 | #显示总行数与百分比 14 | 15 | """ 16 | 17 | import os 18 | import io 19 | 20 | file_list={} 21 | source_list={} 22 | #查找文件 23 | def _find_file(file_path,target): 24 | os.chdir(file_path) 25 | all_files=os.listdir(os.curdir) 26 | for each in all_files: 27 | fext=os.path.splitext(each)[1] 28 | # fext 是文件后缀,比如 .py 29 | if fext in target: 30 | lines=_calc_code(each) #统计行数 31 | # print("文件%s的代码行数是%d"%(each,lines)) 32 | #统计文件数 33 | try: 34 | file_list[fext]+=1 35 | except KeyError: 36 | file_list[fext]=1 37 | #统计源代码行数 38 | try: 39 | source_list[fext] += lines 40 | #print(source_list[fext]) 41 | except KeyError: 42 | source_list[fext] = lines 43 | #print(source_list[fext]) 44 | if os.path.isdir(each): 45 | _find_file(each,target) # 递归调用 46 | os.chdir(os.pardir) #返回上层目录 47 | 48 | 49 | #统计行数 50 | def _calc_code(file_name): 51 | pwd = os.getcwd() 52 | with io.open(file_name,'r',encoding='utf-8') as f: 53 | # print("正在分析文件%s..."%file_name) 54 | try: 55 | # for eachline in f: 56 | # lines += 1 57 | code_lines = 0 # 代码行数 58 | comment_lines = 0 # 注释行数 59 | blank_lines = 0 # 空白行数 内容为'\n',strip()后为'' 60 | is_comment = False 61 | start_comment_index = 0 # 记录以'''或"""开头的注释位置 62 | tota_lines = 0 # 总行数 63 | for index,line in enumerate(f,start=1): 64 | tota_lines += 1 65 | line = line.strip() #去除开头和结尾的空白符 66 | # 判断多行注释是否已经开始 67 | if not is_comment: 68 | if line.startswith("'''") or line.startswith('"""'): 69 | is_comment = True 70 | start_comment_index = index 71 | #单行注释 72 | elif line.startswith('#'): 73 | comment_lines += 1 74 | #空白行 75 | elif line == '': 76 | blank_lines += 1 77 | #代码行 78 | else: 79 | code_lines += 1 80 | #多行注释已经开始 81 | else: 82 | if line.endswith("'''") or line.endswith('"""'): 83 | is_comment = False 84 | comment_lines += index - start_comment_index + 1 85 | else: 86 | pass 87 | 88 | except UnicodeDecodeError: 89 | pass 90 | assert tota_lines == code_lines + blank_lines + comment_lines 91 | print("文件 %s/%s 分析完毕,包含[代码行]:%d [空行]:%d [注释行]:%d [总行]:%d" %(pwd,file_name,code_lines,blank_lines,comment_lines,tota_lines)) 92 | return code_lines 93 | 94 | 95 | #显示结果 96 | def _show_result(start_dir): 97 | lines=0 98 | total=0 99 | text='' 100 | 101 | for i in source_list: 102 | lines=source_list[i] 103 | total+=lines 104 | text+='%s源文件%d个,源代码%d行\n'%(i,file_list[i],lines ) 105 | 106 | title='统计结果' 107 | msg='目前代码行数:%d\n完成进度:%.2f%%\n距离十万行代码还差%d行'%(total,total/1000,100000-total) 108 | print "%s|%s|%s" % (msg,title,text) 109 | 110 | def count(path): 111 | if not os.path.isdir(path): 112 | print "%s is not dir" % path 113 | sys.exit(-1) 114 | target=['.py','.java','.c','.cc','.cpp'] #定义需要查找的源文件类型 115 | _find_file(path,target) 116 | print "##########################################" 117 | _show_result(path) 118 | 119 | if __name__ == "__main__": 120 | import sys, inspect 121 | if len(sys.argv) < 2: 122 | print "Usage:" 123 | for k, v in sorted(globals().items(), key=lambda item: item[0]): 124 | if inspect.isfunction(v) and k[0] != "_": 125 | args, __, __, defaults = inspect.getargspec(v) 126 | if defaults: 127 | print sys.argv[0], k, str(args[:-len(defaults)])[1:-1].replace(",", ""), \ 128 | str(["%s=%s" % (a, b) for a, b in zip(args[-len(defaults):], defaults)])[1:-1].replace(",", "") 129 | else: 130 | print sys.argv[0], k, str(v.func_code.co_varnames[:v.func_code.co_argcount])[1:-1].replace(",", "") 131 | sys.exit(-1) 132 | else: 133 | func = eval(sys.argv[1]) 134 | args = sys.argv[2:] 135 | try: 136 | r = func(*args) 137 | except Exception, e: 138 | print "Usage:" 139 | print "\t", "python %s" % sys.argv[1], str(func.func_code.co_varnames[:func.func_code.co_argcount])[1:-1].replace(",", "") 140 | if func.func_doc: 141 | print "\n".join(["\t\t" + line.strip() for line in func.func_doc.strip().split("\n")]) 142 | print e 143 | r = -1 144 | import traceback 145 | traceback.print_exc() 146 | if isinstance(r, int): 147 | sys.exit(r) 148 | -------------------------------------------------------------------------------- /Example/python_interactive/README.md: -------------------------------------------------------------------------------- 1 | ## Python 交互式解释器自动补全 2 | 3 | 在使用 Python 解释器的时候由于有太多的内置函数,如果没有自动补全功能会给我们带来很大程度的不便。 4 | 通过在编辑一个文件有以下内容文件名~/.pythonstartup.py 5 | 6 | ``` 7 | import readline, rlcompleter 8 | readline.parse_and_bind("tab: complete") 9 | ``` 10 | 11 | 这样我们再~/.bashrc 文件中添加 12 | export PYTHONSTARTUP=~/.pythonstartup.py 13 | 就可以让以后我们打开 python 交互式解释器的时候可以自动加载上面的语句。 14 | 方便以后的交互式操作。 15 | 16 | 操作方法 17 | ``` 18 | #curl -o pythonstartup.sh https://raw.githubusercontent.com/BillWang139967/MyPythonLib/master/Example/python_interactive/pythonstartup.sh 19 | #sh pythonstartup.sh 20 | #. ~/.bashrc 21 | ``` 22 | 23 | -------------------------------------------------------------------------------- /Example/python_interactive/pythonstartup.sh: -------------------------------------------------------------------------------- 1 | ######################################################################### 2 | # File Name: pythonstartup.sh 3 | # Author: 遇见王斌 4 | # mail: meetbill@163.com 5 | # Created Time: 2016-11-25 16:10:04 6 | ######################################################################### 7 | #!/bin/bash 8 | 9 | echo "import readline, rlcompleter" > ~/.pythonstartup.py 10 | echo 'readline.parse_and_bind("tab: complete")' >> ~/.pythonstartup.py 11 | 12 | 13 | if [[ -e ~/.bashrc ]] 14 | then 15 | CK_BASH=`grep "PYTHONSTARTUP" ~/.bashrc | wc -l` 16 | if [[ "w${CK_BASH}" = "w0" ]] 17 | then 18 | echo " " >> ~/.bashrc 19 | echo "export PYTHONSTARTUP=~/.pythonstartup.py" >> ~/.bashrc 20 | fi 21 | else 22 | echo "export PYTHONSTARTUP=~/.pythonstartup.py" >> ~/.bashrc 23 | fi 24 | . ~/.bashrc 25 | 26 | echo ":)" 27 | -------------------------------------------------------------------------------- /Example/random_test/README.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: post 3 | title: 随机选择问题和回答者 4 | subtitle: 5 | date: 2016-12-09 10:22:47 6 | category: 7 | author: meetbill 8 | tags: 9 | - 小例子 10 | --- 11 | 12 | 随机选择问题和回答者 13 | 14 | ## 使用 15 | 16 | ``` 17 | #python random_test.py 18 | ``` 19 | -------------------------------------------------------------------------------- /Example/random_test/doc/test.txt: -------------------------------------------------------------------------------- 1 | 问题一XXXXXXXXX 2 | 问题二######### 3 | 问题三********* 4 | 问题四%%%%%%%%% 5 | 6 | -------------------------------------------------------------------------------- /Example/random_test/doc/user.txt: -------------------------------------------------------------------------------- 1 | 张三 2 | 李四 3 | 王五 4 | -------------------------------------------------------------------------------- /Example/random_test/random_test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | #coding=utf-8 3 | """ 4 | # Author: meetbill 5 | # Created Time : 2016-12-08 22:38:08 6 | 7 | # File Name: random_test.py 8 | # Description: 随机选择一道题,并且随机选择一个人进行作答 9 | 10 | """ 11 | import linecache 12 | import random 13 | import sys 14 | reload(sys) 15 | sys.setdefaultencoding('utf-8') 16 | 17 | def test(): 18 | f = [ x.replace('\n','') for x in linecache.getlines('doc/test.txt')] 19 | random_int = random.randint(0, len(f)-1) 20 | return f[random_int] 21 | 22 | def user(): 23 | f = [ x.replace('\n','') for x in linecache.getlines('doc/user.txt')] 24 | random_int = random.randint(0, len(f)-1) 25 | return f[random_int] 26 | 27 | print('嘿嘿,开始选择今天的幸运使者:' ) 28 | print """ 29 | //////////////////////////////////////////////////////////////////// 30 | // _ooOoo_ // 31 | // o8888888o // 32 | // 88" . "88 // 33 | // (| -_- |) // 34 | // O\ = /O // 35 | // ____/`---'\____ // 36 | // .' \\| |// `. // 37 | // / \\||| : |||// \ // 38 | // / _||||| -:- |||||- \ // 39 | // | | \\\ - /// | | // 40 | // | \_| ''\---/'' | | // 41 | // \ .-\__ `-` ___/-. / // 42 | // ___`. .' /--.--\ `. . ___ // 43 | // ."" '< `.___\_<|>_/___.' >'"". // 44 | // | | : `- \`.;`\ _ /`;.`/ - ` : | | // 45 | // \ \ `-. \_ __\ /__ _/ .-` / / // 46 | // ========`-.____`-.___\_____/___.-`____.-'======== // 47 | // `=---=' // 48 | // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ // 49 | // 寻找有缘人 // 50 | //////////////////////////////////////////////////////////////////// 51 | 52 | """ 53 | print "题目:",test() 54 | print 55 | print "幸运者:",user() 56 | -------------------------------------------------------------------------------- /Example/store_test/config.ini: -------------------------------------------------------------------------------- 1 | [default] 2 | # Sub-directory numbers 3 | DIR_PATH_NUM = 3 4 | # File numbers in each sub-directory 5 | FILE_NUM = 10 6 | # Path to write file 7 | PATH_STORE = /tmp/test/ 8 | # Path to record logs 9 | PATH_LOG_RECORD = /tmp/record/ 10 | # Max file size 11 | MAX_FIlE_SIZE_MB = 1 12 | -------------------------------------------------------------------------------- /Example/store_test/test_check.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | #coding=utf8 3 | """ 4 | # Author: meetbill 5 | # Created Time : 2017-03-07 22:56:46 6 | 7 | # File Name: test_check.py 8 | # Description: 9 | 10 | """ 11 | 12 | import os 13 | import threading 14 | import hashlib 15 | import ConfigParser 16 | config = ConfigParser.ConfigParser() 17 | config.read('./config.ini') 18 | PATH_STORE = config.get('default','PATH_STORE') 19 | LOG_PATH = config.get('default','PATH_LOG_RECORD') 20 | DIR_PATH_NUM= int(config.get('default', 'DIR_PATH_NUM')) 21 | 22 | 23 | def logfail(data): 24 | 25 | if not os.path.exists(LOG_PATH): 26 | os.makedirs(os.path.dirname(LOG_PATH)) 27 | 28 | path = LOG_PATH + "failed.log" 29 | 30 | fout = open(path, "a") 31 | fout.write(data + "\n") # it's thread-safe 32 | fout.flush() 33 | 34 | def get_md5(filename): 35 | if not os.path.isfile(filename): 36 | return 37 | myhash = hashlib.md5() 38 | f = file(filename, 'rb') 39 | while True: 40 | b = f.read(8096) 41 | if not b : 42 | break 43 | myhash.update(b) 44 | f.close() 45 | return myhash.hexdigest() 46 | 47 | def check_file(path, file_name): 48 | full_file_name = path + file_name 49 | md5 = get_md5(full_file_name) 50 | if md5 != file_name: 51 | logfail(full_file_name) 52 | print "Check failed:", md5 53 | else: 54 | print "Check pass" 55 | 56 | def ckeck_file_list(file_path): 57 | file_list = os.listdir(file_path) 58 | for file_name in file_list: 59 | if not str(file_name).startswith("."): 60 | check_file(file_path, file_name) 61 | 62 | def check_files_mutilthread(path_store, dir_num): 63 | threads = [] 64 | 65 | for i in xrange(0, dir_num): 66 | dir_index = str(i); 67 | file_path = path_store + "/" + dir_index + "/" 68 | t = threading.Thread(target=ckeck_file_list, args=(file_path,)) 69 | t.start() 70 | threads.append(t) 71 | 72 | for t in threads: 73 | t.join() 74 | 75 | if __name__ == '__main__': 76 | check_files_mutilthread(PATH_STORE, DIR_PATH_NUM) 77 | -------------------------------------------------------------------------------- /Example/store_test/test_create_file.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | #coding=utf8 3 | """ 4 | # Author: meetbill 5 | # Created Time : 2017-03-07 22:59:17 6 | 7 | # File Name: test_create_file.py 8 | # Description: 9 | 10 | """ 11 | import os 12 | import random 13 | import hashlib 14 | import threading 15 | import ConfigParser 16 | 17 | config = ConfigParser.ConfigParser() 18 | config.read('./config.ini') 19 | 20 | DIR_PATH_NUM= int(config.get('default', 'DIR_PATH_NUM')) 21 | FILE_NUM = int(config.get('default','FILE_NUM')) 22 | PATH_STORE = config.get('default','PATH_STORE') 23 | PATH_LOG_RECORD = config.get('default','PATH_LOG_RECORD') 24 | MAX_FIlE_SIZE_MB = int(config.get('default','MAX_FIlE_SIZE_MB')) 25 | 26 | def log(path, data, suffix="log"): 27 | fout = open(path + "md5log." + suffix, "a") 28 | fout.write(data) 29 | fout.write("\n") 30 | 31 | def get_md5(filename): 32 | if not os.path.isfile(filename): 33 | return 34 | myhash = hashlib.md5() 35 | f = file(filename, 'rb') 36 | while True: 37 | b = f.read(8096) 38 | if not b : 39 | break 40 | myhash.update(b) 41 | f.close() 42 | return myhash.hexdigest() 43 | 44 | 45 | def write_file(path_store, file_index, dir_index): 46 | 47 | ceshi_path = path_store + "/" + dir_index + "/" 48 | if not os.path.exists(ceshi_path): 49 | os.makedirs(os.path.dirname(ceshi_path)) 50 | 51 | ceshi_full_path = ceshi_path + file_index 52 | 53 | 54 | data = (str(8).zfill(8)) * random.randint(0, 128 * 1024 * MAX_FIlE_SIZE_MB) 55 | 56 | fout = open(ceshi_full_path, "w") 57 | fout.write(data) 58 | fout.flush() 59 | md5_ceshifile = get_md5(ceshi_full_path) 60 | os.rename(os.path.join(ceshi_path, file_index), os.path.join(ceshi_path,md5_ceshifile)) 61 | 62 | log_data = md5_ceshifile 63 | log(PATH_LOG_RECORD, log_data, dir_index) 64 | print dir_index, file_index, log_data 65 | 66 | 67 | def write_files(path_store, file_num, dir_index): 68 | 69 | for i in xrange(0, file_num): 70 | write_file(path_store, str(i), dir_index) 71 | 72 | def writ_files_mutilthread(path_store, file_num): 73 | 74 | if not os.path.exists(PATH_LOG_RECORD): 75 | os.makedirs(os.path.dirname(PATH_LOG_RECORD)) 76 | 77 | threads = [] 78 | 79 | for i in xrange(0, DIR_PATH_NUM): 80 | dir_index = str(i); 81 | t = threading.Thread(target=write_files, args=(path_store, file_num, dir_index)) 82 | t.start() 83 | threads.append(t) 84 | 85 | for t in threads: 86 | t.join() 87 | 88 | if __name__ == '__main__': 89 | writ_files_mutilthread(PATH_STORE, FILE_NUM) 90 | -------------------------------------------------------------------------------- /My_lib/Example/Agent/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/meetbill/MyPythonLib/fd58b8a112ad366fba67771cafd752f0b8f378ee/My_lib/Example/Agent/__init__.py -------------------------------------------------------------------------------- /My_lib/Example/Agent/agent.py: -------------------------------------------------------------------------------- 1 | #!python 2 | 3 | from Queue import Queue 4 | import time 5 | import socket 6 | import sys, os 7 | import threading 8 | import json 9 | 10 | from netlib.NetBase import nbNet 11 | from netlib.NetUtils import sendData 12 | from monitems import Mon 13 | 14 | class porterThread(threading.Thread): 15 | 16 | def __init__(self, name, queue, interval=None, host=None, port=None): 17 | threading.Thread.__init__(self) 18 | self.name = name 19 | self.queue = queue 20 | self.interval = interval 21 | self.host = host 22 | self.port = port 23 | 24 | def run(self): 25 | if self.name == 'collect': 26 | self.collect_data() 27 | elif self.name == 'senddata': 28 | self.send_data() 29 | elif self.name == 'receivecmd': 30 | self.receivecmd() 31 | elif self.name == 'sendcmdresult': 32 | self.sendcmdresult() 33 | 34 | def collect_data(self): 35 | m = Mon() 36 | atime = int(time.time()) 37 | while 1: 38 | data = m.runAllGet() 39 | self.queue.put(data) 40 | btime = int(time.time()) 41 | time.sleep(self.interval-((btime-atime)%self.interval)) 42 | def send_data(self): 43 | try: 44 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 45 | sock_l = [s] 46 | s.connect((self.host, self.port)) 47 | except socket.error as err: 48 | print err 49 | while 1: 50 | print "send data to %s %s" %(self.host, self.port) 51 | if not self.queue.empty(): 52 | data = self.queue.get() 53 | sendData(sock_l, self.host, self.port, json.dumps(data)) 54 | print data, "q size is: ", self.queue.qsize() 55 | time.sleep(self.interval) 56 | 57 | def receivecmd(self): 58 | pass 59 | 60 | def sendcmdresult(self): 61 | pass 62 | 63 | 64 | 65 | def startTh(): 66 | queue = Queue(10) 67 | collect = porterThread("collect", queue, interval=30) 68 | collect.start() 69 | time.sleep(0.5) # why must sleep 70 | senddata = porterThread("senddata", queue, interval=30, host="127.0.0.1",port=50003) 71 | senddata.start() 72 | #cmdqueue = Queue(10) 73 | #recvcmd = porterThread("receivecmd", cmdqueue, interval=30, host="0.0.0.0", port=50000) 74 | #recvcmd.start() 75 | #sendcmdresult = porterThread("sendcmdresult", cmdqueue, interval=30,host = '0.0.0.0', port=50002) 76 | #sendcmdresult.start() 77 | 78 | collect.join() 79 | senddata.join() 80 | #recvcmd.join() 81 | #sendcmdresult() 82 | 83 | if __name__ == '__main__': 84 | startTh() 85 | -------------------------------------------------------------------------------- /My_lib/Example/Agent/agent.server: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | #chkconfig: 2345 88 80 3 | #description:agent 4 | #processname:agent 5 | ######################################## 6 | DIR_SOFT=./ 7 | prog=agent 8 | NAME_SCRIPT=run_daemon.py 9 | ######################################## 10 | . /etc/profile 11 | . /etc/rc.d/init.d/functions 12 | 13 | function start(){ 14 | cd ${DIR_SOFT} 15 | if [ ! -x ${NAME_SCRIPT} ] 16 | then 17 | chmod 777 ${NAME_SCRIPT} 18 | fi 19 | python ${NAME_SCRIPT} start 20 | ret=$? 21 | if [ $ret -eq 0 ]; then 22 | action $"Starting $prog: " /bin/true 23 | else 24 | action $"Starting $prog: " /bin/false 25 | fi 26 | return 0 27 | } 28 | 29 | 30 | function stop(){ 31 | cd ${DIR_SOFT} 32 | if [ ! -x ${NAME_SCRIPT} ] 33 | then 34 | chmod 777 ${NAME_SCRIPT} 35 | fi 36 | 37 | python ${NAME_SCRIPT} stop 38 | ret=$? 39 | if [ $ret -eq 0 ]; then 40 | action $"Stopping $prog: " /bin/true 41 | else 42 | action $"Stopping $prog: " /bin/false 43 | fi 44 | 45 | } 46 | 47 | restart(){ 48 | cd ${DIR_SOFT} 49 | python ${NAME_SCRIPT} restart 50 | } 51 | 52 | # See how we were called. 53 | case "$1" in 54 | start) 55 | start 56 | ;; 57 | stop) 58 | stop 59 | ;; 60 | status) 61 | status 62 | ;; 63 | restart) 64 | restart 65 | ;; 66 | *) 67 | echo $"Usage: $0 {start|stop|status}" 68 | exit 2 69 | esac 70 | 71 | exit $? 72 | -------------------------------------------------------------------------------- /My_lib/Example/Agent/daemon.py: -------------------------------------------------------------------------------- 1 | #-*- coding:utf-8 -*- 2 | import sys, os, time, atexit 3 | from signal import SIGTERM 4 | 5 | # 1.第一次fork将会创建父-子进程,同时使得父进程退出保证守护进程能够运行在后台。 6 | # 2.通过setsid步骤使得进程与控制终端、登录会话以及进程组脱离。 7 | # 3.第二次fork将确保进程重新打开控制终端,并且产生子-孙进程,而子进程退出后孙进程将成为真正的守护进程。 8 | # 4.其他还有一些诸如工作目录设置、关闭文件描述符、设置文件创建掩码之类的操作。 9 | 10 | class Daemon(object): 11 | """ 12 | A generic daemon class. 13 | 14 | Usage: subclass the Daemon class and override the run() method 15 | """ 16 | def __init__(self, pidfile='/tmp/nbMon.pid', stdin='/dev/null', stdout='nbMon.log', stderr='nbMon.log'): 17 | self.stdin = stdin 18 | self.stdout = stdout 19 | self.stderr = stderr 20 | self.pidfile = pidfile 21 | 22 | def daemonize(self): 23 | """ 24 | do the UNIX double-fork magic, see Stevens' "Advanced 25 | Programming in the UNIX Environment" for details (ISBN 0201563177) 26 | http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 27 | """ 28 | try: 29 | pid = os.fork() 30 | if pid > 0: 31 | # exit first parent 32 | sys.exit(0) 33 | except OSError, e: 34 | sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror)) 35 | sys.exit(1) 36 | 37 | # decouple from parent environment 38 | os.chdir("/") 39 | os.setsid() 40 | os.umask(0) 41 | 42 | # do second fork 43 | try: 44 | pid = os.fork() 45 | if pid > 0: 46 | # exit from second parent 47 | sys.exit(0) 48 | except OSError, e: 49 | sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror)) 50 | sys.exit(1) 51 | 52 | # redirect standard file descriptors 53 | sys.stdout.flush() 54 | sys.stderr.flush() 55 | si = file(self.stdin, 'r') 56 | so = file(self.stdout, 'a+') 57 | se = file(self.stderr, 'a+', 0) 58 | os.dup2(si.fileno(), sys.stdin.fileno()) 59 | os.dup2(so.fileno(), sys.stdout.fileno()) 60 | os.dup2(se.fileno(), sys.stderr.fileno()) 61 | 62 | # write pidfile 63 | atexit.register(self.delpid) 64 | pid = str(os.getpid()) 65 | file(self.pidfile,'w+').write("%s\n" % pid) 66 | 67 | def delpid(self): 68 | os.remove(self.pidfile) 69 | 70 | def start(self): 71 | """ 72 | Start the daemon 73 | """ 74 | # Check for a pidfile to see if the daemon already runs 75 | try: 76 | pf = file(self.pidfile,'r') 77 | pid = int(pf.read().strip()) 78 | pf.close() 79 | except IOError: 80 | pid = None 81 | 82 | if pid: 83 | message = "pidfile %s already exist. Daemon already running?\n" 84 | sys.stderr.write(message % self.pidfile) 85 | sys.exit(1) 86 | 87 | # Start the daemon 88 | self.daemonize() 89 | self.run() 90 | 91 | def stop(self): 92 | """ 93 | Stop the daemon 94 | """ 95 | # Get the pid from the pidfile 96 | try: 97 | pf = file(self.pidfile,'r') 98 | pid = int(pf.read().strip()) 99 | pf.close() 100 | except IOError: 101 | pid = None 102 | 103 | if not pid: 104 | message = "pidfile %s does not exist. Daemon not running?\n" 105 | sys.stderr.write(message % self.pidfile) 106 | return # not an error in a restart 107 | 108 | # Try killing the daemon process 109 | try: 110 | while 1: 111 | os.kill(pid, SIGTERM) 112 | time.sleep(0.1) 113 | except OSError, err: 114 | err = str(err) 115 | if err.find("No such process") > 0: 116 | if os.path.exists(self.pidfile): 117 | os.remove(self.pidfile) 118 | else: 119 | print str(err) 120 | sys.exit(1) 121 | 122 | def restart(self): 123 | """ 124 | Restart the daemon 125 | """ 126 | self.stop() 127 | self.start() 128 | 129 | def run(self): 130 | """ 131 | You should override this method when you subclass Daemon. It will be called after the process has been 132 | daemonized by start() or restart(). 133 | """ 134 | -------------------------------------------------------------------------------- /My_lib/Example/Agent/monitems.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | #-*- coding:utf-8 -*- 3 | import inspect 4 | import os, socket, time 5 | 6 | class Mon: 7 | def __init__(self): 8 | self.data = {} 9 | 10 | def getLoadAvg(self): 11 | with open("/proc/loadavg") as f: 12 | a = f.read().split()[:3] 13 | return float(a[0]) 14 | 15 | def getMemTotal(self): 16 | with open("/proc/meminfo") as f: 17 | a = int(f.readline().split()[1]) 18 | return a/1024 19 | 20 | def getMemUsage(self, noBufferCache=True): 21 | if noBufferCache: 22 | with open("/proc/meminfo") as f: 23 | T = int(f.readline().split()[1]) 24 | F = int(f.readline().split()[1]) 25 | B = int(f.readline().split()[1]) 26 | C = int(f.readline().split()[1]) 27 | return (T-F-B-C)/1024 28 | else: 29 | with open("/proc/meminfo") as f: 30 | a = int(f.readline().split()[1]) - int(f.readline().split()[1]) 31 | return a/1024 32 | 33 | def getMemFree(self, noBufferCache=True): 34 | if noBufferCache: 35 | with open("/proc/meminfo") as f: 36 | T = int(f.readline().split()[1]) 37 | F = int(f.readline().split()[1]) 38 | B = int(f.readline().split()[1]) 39 | C = int(f.readline().split()[1]) 40 | return (F+B+C)/1024 41 | else: 42 | with open("/proc/meminfo") as f: 43 | f.readline() 44 | a = int(f.readline().split()[1]) 45 | return a/1024 46 | 47 | def getHost(self): 48 | return socket.gethostname() 49 | 50 | def getTime(self): 51 | return int(time.time()) 52 | 53 | def runAllGet(self): 54 | for fun in inspect.getmembers(self, predicate=inspect.ismethod): 55 | if fun[0][:3] == "get": 56 | self.data[fun[0][3:]] = fun[1]() 57 | return self.data 58 | 59 | if __name__ == "__main__": 60 | print Mon().runAllGet() 61 | print Mon().getMemTotal() 62 | 63 | -------------------------------------------------------------------------------- /My_lib/Example/Agent/monitor.py: -------------------------------------------------------------------------------- 1 | #-*- coding:utf-8 -*- 2 | from netlib.daemon import Daemon 3 | from netlib.NetBase import nbNet 4 | from netlib.NetUtils import sendData 5 | import json 6 | 7 | 8 | class Monitor(Daemon): 9 | #��ȡ����� 10 | def __init__(self, host, port,sock_l=None): 11 | self.items = ['cpuused', 'memtotal', 'memused'] 12 | self.logic = None 13 | self.sock_l[0] = sock_l 14 | self.host = host 15 | self.port = port 16 | 17 | 18 | #���ռ�����ȡ������� 19 | def readData(self): 20 | data = {} 21 | for item in self.items: 22 | data[item] = self.getData(item) 23 | return json.dumps(data) 24 | 25 | #���ͼ�����ݵ�saver�� saver�����͸����ݿ���澯ģ�� 26 | def sendMonitorData(self): 27 | data = self.readData() 28 | sendData(self.sock_l, self.host, self.port, data) 29 | 30 | def getData(self, item): 31 | value = 10 32 | return value 33 | 34 | def run(self): 35 | self.sendMonitorData() 36 | 37 | 38 | if __name__ == '__main__': 39 | monitor_agent = Monitor('0.0.0.0', 9076) 40 | monitor_agent.run() 41 | 42 | 43 | 44 | 45 | 46 | 47 | -------------------------------------------------------------------------------- /My_lib/Example/Agent/netlib/NetBase.py: -------------------------------------------------------------------------------- 1 | #-*- coding:utf-8 -*- 2 | from daemon import Daemon 3 | import socket 4 | import select 5 | import time 6 | 7 | from NetUtils import dbgPrint 8 | 9 | __all__ = ["nbNet", "sendData_mh"] 10 | 11 | #DEBUG = True 12 | from NetUtils import * 13 | 14 | class nbNetBase: 15 | def setFd(self, sock): 16 | """sock is class object of socket""" 17 | dbgPrint("\n -- setFd start!") 18 | _state = STATE() 19 | _state.sock_obj = sock 20 | self.conn_state[sock.fileno()] = _state 21 | self.conn_state[sock.fileno()].printState() 22 | dbgPrint("\n -- setFd End!") 23 | 24 | def accept(self, fd): 25 | dbgPrint("\n -- start Accept function") 26 | _sock_state = self.conn_state[fd] 27 | _sock = _sock_state.sock_obj 28 | conn, addr = _sock.accept() 29 | conn.setblocking(0) 30 | return conn 31 | 32 | 33 | def close(self, fd): 34 | try: 35 | sock = self.conn_state[fd].sock_obj 36 | sock.close() 37 | self.epoll_sock.unregister(fd) 38 | self.conn_state.pop(fd) 39 | except: 40 | dbgPrint("Close fd: %s abnormal" % fd) 41 | pass 42 | 43 | def read(self,fd): 44 | try: 45 | sock_state = self.conn_state[fd] 46 | conn = sock_state.sock_obj 47 | if sock_state.need_read <= 0: 48 | raise socket.error 49 | one_read = conn.recv(sock_state.need_read).lstrip() 50 | dbgPrint("\tread func fd %d, one_read: %s, need_read: %d" %(fd, one_read, sock_state.need_read)) 51 | if len(one_read) == 0: 52 | raise socket.error 53 | sock_state.buff_read += one_read 54 | sock_state.have_read += len(one_read) 55 | sock_state.need_read -= len(one_read) 56 | sock_state.printState() 57 | 58 | if sock_state.have_read == 10: 59 | header_said_need_read = int(sock_state.buff_read) 60 | print "header_said_need_read %d" % header_said_need_read 61 | if header_said_need_read <= 0: 62 | raise socket.error 63 | sock_state.need_read += header_said_need_read 64 | sock_state.buff_read="" 65 | sock_state.printState() 66 | return "readcontent" 67 | elif sock_state.need_read == 0: 68 | return "process" 69 | else: 70 | return "readmore" 71 | except (socket.error, ValueError) , msg: 72 | try: 73 | if msg.error == 11: 74 | dbgPrint("11 " + msg) 75 | return "retry" 76 | except: 77 | pass 78 | return 'closing' 79 | 80 | 81 | def write(self, fd): 82 | sock_state = self.conn_state[fd] 83 | conn = sock_state.sock_obj 84 | last_have_send = sock_state.have_write 85 | try: 86 | have_send = conn.send(sock_state.buff_write[last_have_send:]) 87 | sock_state.have_write += have_send 88 | sock_state.need_write -= have_send 89 | if sock_state.need_write == 0 and sock_state.have_write != 0: 90 | conn.send("0000000002OK") #此处真坑爹啊! 调试一天发现的问题 91 | dbgPrint("\n write data completed!") 92 | return "writecomplete" 93 | else: 94 | return "writemore" 95 | except socket.error, msg: 96 | return "closing" 97 | 98 | def run(self): 99 | while True: 100 | dbgPrint("\n -- run func loop") 101 | for i in self.conn_state.iterkeys(): 102 | dbgPrint("\n -- state of fd: %d" % i) 103 | self.conn_state[i].printState(); 104 | 105 | epoll_list = self.epoll_sock.poll() 106 | for fd, events in epoll_list: 107 | dbgPrint("\n-- run epoll return fd: %d, event: %s" %(fd, events)) 108 | sock_state = self.conn_state[fd] 109 | if select.EPOLLHUP & events: 110 | dbgPrint("events EPOLLHUP") 111 | sock_state.state = "closing" 112 | elif select.EPOLLERR & events: 113 | dbgPrint("EPOLLERROR") 114 | sock_state.state = "closing" 115 | 116 | self.state_machine(fd) 117 | 118 | def state_machine(self, fd): 119 | dbgPrint("\n-- state machine: fd %d, statue is: %s" %(fd, self.conn_state[fd].state)) 120 | sock_state = self.conn_state[fd] 121 | self.sm[sock_state.state](fd) 122 | 123 | class nbNet(nbNetBase): 124 | def __init__(self, addr, port, logic): 125 | dbgPrint("\n-- __init__: start!") 126 | self.conn_state = {} 127 | self.listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) 128 | self.listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 129 | self.listen_sock.bind((addr, port)) 130 | self.listen_sock.listen(10) 131 | self.setFd(self.listen_sock) 132 | self.epoll_sock = select.epoll() 133 | self.epoll_sock.register(self.listen_sock.fileno(), select.EPOLLIN) 134 | self.logic = logic 135 | self.sm = { 136 | 'accept': self.accept2read, 137 | "read": self.read2process, 138 | "write":self.write2read, 139 | "process": self.process, 140 | "closing": self.close, 141 | } 142 | 143 | def process(self, fd): 144 | sock_state = self.conn_state[fd] 145 | response = self.logic(sock_state.buff_read) 146 | sock_state.buff_write = "%010d%s" %(len(response), response) 147 | dbgPrint("%010d%s" %(len(response), response)) 148 | sock_state.need_write = len(sock_state.buff_write) 149 | sock_state.state = "write" 150 | self.epoll_sock.modify(fd, select.EPOLLOUT) 151 | sock_state.printState() 152 | 153 | def accept2read(self, fd): 154 | conn = self.accept(fd) 155 | self.epoll_sock.register(conn.fileno(), select.EPOLLIN) 156 | self.setFd(conn) 157 | self.conn_state[conn.fileno()].state = "read" 158 | 159 | def read2process(self, fd): 160 | read_ret = "" 161 | try: 162 | read_ret = self.read(fd) 163 | except Exception as msg: 164 | dbgPrint(msg) 165 | read_ret = "closing" 166 | 167 | if read_ret == "process": 168 | self.process(fd) 169 | elif read_ret == "readcontent": 170 | pass 171 | elif read_ret == "readmore": 172 | pass 173 | elif read_ret == "retry": 174 | pass 175 | elif read_ret == "closing": 176 | self.conn_state[fd].state = "closing" 177 | self.state_machine(fd) 178 | else: 179 | raise Exception("impossible state returned by self.read") 180 | 181 | def write2read(self, fd): 182 | try: 183 | write_ret = self.write(fd) 184 | except socket.error, msg: 185 | write_ret = "closing" 186 | 187 | if write_ret == "writemore": 188 | pass 189 | elif write_ret == "writecomplete": 190 | sock_state = self.conn_state[fd] 191 | conn = sock_state.sock_obj 192 | self.setFd(conn) 193 | self.conn_state[fd].state = "read" 194 | self.epoll_sock.modify(fd, select.EPOLLIN) 195 | elif write_ret == "closing": 196 | dbgPrint(msg) 197 | self.conn_state[fd].state = "closing" 198 | self.state_machine(fd) 199 | 200 | 201 | if __name__ == '__main__': 202 | def logic(d_in): 203 | return d_in[::-1] 204 | 205 | serverD = nbNet('0.0.0.0', 9076, logic) 206 | serverD.run() 207 | -------------------------------------------------------------------------------- /My_lib/Example/Agent/netlib/NetUtils.py: -------------------------------------------------------------------------------- 1 | #-*- coding:utf-8 -*- 2 | from inspect import currentframe 3 | import socket 4 | import select 5 | import time 6 | 7 | DEBUG = False 8 | 9 | def get_linenumber(): 10 | cf = currentframe() 11 | return str(cf.f_back.f_back.f_lineno) 12 | 13 | def dbgPrint(msg): 14 | if DEBUG: 15 | print get_linenumber(), msg 16 | 17 | import signal, functools 18 | 19 | class TimeoutError(Exception):pass 20 | 21 | def timeout(seconds, error_message="function call time out"): 22 | 23 | def decorated(func): 24 | def _handle_timeout(signum, frame): 25 | raise TimeoutError(error_message); 26 | def wrapper(*args, **kwargs): 27 | signal.signal(signal.SIGALRM, _handle_timeout) 28 | signal.alarm(seconds) 29 | try: 30 | result = func(*args, **kwargs) 31 | finally: 32 | signal.alarm(10) 33 | return result 34 | return functools.wraps(func)(wrapper) 35 | return decorated 36 | 37 | 38 | @timeout(5) 39 | def connect_timeout(socket, host_port): 40 | return socket.connect(host_port) 41 | 42 | def sendData_mh(sock_list, host_list, data, single_host_retry=3): 43 | """ 44 | saver_list = [host1:port, host2:port, host3:port] 45 | sock_list = [some socket] 46 | """ 47 | done = False 48 | for host_port in host_list: 49 | if done: 50 | break 51 | host, port = host_port.split(":") 52 | port = int(port) 53 | retry = 0 54 | while retry < single_host_retry: 55 | try: 56 | if sock_list[0] == None: 57 | sock_list[0] = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 58 | sock_list[0].settimeout(5) 59 | sock_list[0].connect((host, port)) 60 | d = data 61 | sock_list[0].sendall("%010d%s" % (len(d), d)) 62 | count = sock_list[0].recv(10) 63 | if not count: 64 | raise Exception("recv error") 65 | count = int(count) 66 | buf = sock_list[0].recv(count) 67 | if buf[:2] == "OK": 68 | retry = 0 69 | break 70 | 71 | except: 72 | sock_list[0].close() 73 | sock_list[0] = None 74 | retry += 1 75 | 76 | def sendData(sock_l, host, port, data): 77 | retry = 0 78 | while retry < 3: 79 | try: 80 | if sock_l[0] == None: 81 | sock_l[0] = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 82 | sock_l[0].connect((host, port)) 83 | dbgPrint("\n-- start connect %s:%d" %(host, port)) 84 | d = data 85 | sock_l[0].sendall("%010d%s" %(len(data), data)) 86 | count = sock_l[0].recv(10) 87 | if not count: 88 | raise Exception("recv error") 89 | buf = sock_l[0].recv(int(count)) 90 | dbgPrint("recv data: %s" % buf) 91 | if buf[:2] == "OK": 92 | retry = 0 93 | break 94 | except: 95 | sock_l[0].close() 96 | sock_l[0] = None 97 | retry += 1 98 | 99 | # initial status for state machine 100 | class STATE: 101 | def __init__(self): 102 | self.state = "accept" 103 | self.have_read = 0 104 | self.need_read = 10 105 | self.have_write = 0 106 | self.need_write = 0 107 | self.buff_write = "" 108 | self.buff_read = "" 109 | # sock_obj is a object 110 | self.sock_obj = "" 111 | 112 | def printState(self): 113 | if DEBUG: 114 | dbgPrint('\n - current state of fd: %d' % self.sock_obj.fileno()) 115 | dbgPrint(" - - state: %s" % self.state) 116 | dbgPrint(" - - have_read: %s" % self.have_read) 117 | dbgPrint(" - - need_read: %s" % self.need_read) 118 | dbgPrint(" - - have_write: %s" % self.have_write) 119 | dbgPrint(" - - need_write: %s" % self.need_write) 120 | dbgPrint(" - - buff_write: %s" % self.buff_write) 121 | dbgPrint(" - - buff_read: %s" % self.buff_read) 122 | dbgPrint(" - - sock_obj: %s" % self.sock_obj) 123 | -------------------------------------------------------------------------------- /My_lib/Example/Agent/netlib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/meetbill/MyPythonLib/fd58b8a112ad366fba67771cafd752f0b8f378ee/My_lib/Example/Agent/netlib/__init__.py -------------------------------------------------------------------------------- /My_lib/Example/Agent/netlib/daemon.py: -------------------------------------------------------------------------------- 1 | #-*- coding:utf-8 -*- 2 | import sys, os, time, atexit 3 | from signal import SIGTERM 4 | 5 | class Daemon(object): 6 | """ 7 | A generic daemon class. 8 | 9 | Usage: subclass the Daemon class and override the run() method 10 | """ 11 | def __init__(self, pidfile='nbMon.pid', stdin='/dev/null', stdout='nbMon.log', stderr='nbMon.log'): 12 | self.stdin = stdin 13 | self.stdout = stdout 14 | self.stderr = stderr 15 | self.pidfile = pidfile 16 | 17 | def daemonize(self): 18 | """ 19 | do the UNIX double-fork magic, see Stevens' "Advanced 20 | Programming in the UNIX Environment" for details (ISBN 0201563177) 21 | http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 22 | """ 23 | try: 24 | pid = os.fork() 25 | if pid > 0: 26 | # exit first parent 27 | sys.exit(0) 28 | except OSError, e: 29 | sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror)) 30 | sys.exit(1) 31 | 32 | # decouple from parent environment 33 | #os.chdir("/") 34 | os.setsid() 35 | os.umask(0) 36 | 37 | # do second fork 38 | try: 39 | pid = os.fork() 40 | if pid > 0: 41 | # exit from second parent 42 | sys.exit(0) 43 | except OSError, e: 44 | sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror)) 45 | sys.exit(1) 46 | 47 | # redirect standard file descriptors 48 | sys.stdout.flush() 49 | sys.stderr.flush() 50 | si = file(self.stdin, 'r') 51 | so = file(self.stdout, 'a+') 52 | se = file(self.stderr, 'a+', 0) 53 | os.dup2(si.fileno(), sys.stdin.fileno()) 54 | os.dup2(so.fileno(), sys.stdout.fileno()) 55 | os.dup2(se.fileno(), sys.stderr.fileno()) 56 | 57 | # write pidfile 58 | atexit.register(self.delpid) 59 | pid = str(os.getpid()) 60 | file(self.pidfile,'w+').write("%s\n" % pid) 61 | 62 | def delpid(self): 63 | os.remove(self.pidfile) 64 | 65 | def start(self): 66 | """ 67 | Start the daemon 68 | """ 69 | # Check for a pidfile to see if the daemon already runs 70 | try: 71 | pf = file(self.pidfile,'r') 72 | pid = int(pf.read().strip()) 73 | pf.close() 74 | except IOError: 75 | pid = None 76 | 77 | if pid: 78 | message = "pidfile %s already exist. Daemon already running?\n" 79 | sys.stderr.write(message % self.pidfile) 80 | sys.exit(1) 81 | 82 | # Start the daemon 83 | self.daemonize() 84 | self.run() 85 | 86 | def stop(self): 87 | """ 88 | Stop the daemon 89 | """ 90 | # Get the pid from the pidfile 91 | try: 92 | pf = file(self.pidfile,'r') 93 | pid = int(pf.read().strip()) 94 | pf.close() 95 | except IOError: 96 | pid = None 97 | 98 | if not pid: 99 | message = "pidfile %s does not exist. Daemon not running?\n" 100 | sys.stderr.write(message % self.pidfile) 101 | return # not an error in a restart 102 | 103 | # Try killing the daemon process 104 | try: 105 | while 1: 106 | os.kill(pid, SIGTERM) 107 | time.sleep(0.1) 108 | except OSError, err: 109 | err = str(err) 110 | if err.find("No such process") > 0: 111 | if os.path.exists(self.pidfile): 112 | os.remove(self.pidfile) 113 | else: 114 | print str(err) 115 | sys.exit(1) 116 | 117 | def restart(self): 118 | """ 119 | Restart the daemon 120 | """ 121 | self.stop() 122 | self.start() 123 | 124 | def run(self): 125 | """ 126 | You should override this method when you subclass Daemon. It will be called after the process has been 127 | daemonized by start() or restart(). 128 | """ 129 | 130 | 131 | -------------------------------------------------------------------------------- /My_lib/Example/Agent/run_daemon.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | import Queue 3 | import threading 4 | import sys, time 5 | import urllib2 6 | import json 7 | import agent 8 | from daemon import Daemon 9 | 10 | class MyDaemon(Daemon): 11 | def run(self): 12 | agent.startTh() 13 | 14 | if __name__ == "__main__": 15 | daemon = MyDaemon('/var/run/agent.pid') 16 | if len(sys.argv) == 2: 17 | if 'start' == sys.argv[1]: 18 | daemon.start() 19 | elif 'stop' == sys.argv[1]: 20 | daemon.stop() 21 | elif 'restart' == sys.argv[1]: 22 | daemon.restart() 23 | else: 24 | print "Unknown command" 25 | sys.exit(2) 26 | sys.exit(0) 27 | else: 28 | print "usage: %s start|stop|restart" % sys.argv[0] 29 | sys.exit(2) 30 | 31 | 32 | -------------------------------------------------------------------------------- /My_lib/Example/README.md: -------------------------------------------------------------------------------- 1 | # Agent 2 | ``` 3 | cd ./Agent/ 4 | ./agent.server start 5 | ``` 6 | agent默认发送端口为50003 7 | 8 | # Server 9 | ``` 10 | cd ./saver/ 11 | python SaverDB.py 12 | ``` 13 | server 默认监听端口为50003,并将数据保存到数据库中 14 | 数据库密码设置的为123456,会自动创建数据库和表 15 | -------------------------------------------------------------------------------- /My_lib/Example/saver/SaverDB.py: -------------------------------------------------------------------------------- 1 | import sys, os 2 | import MySQLdb as mysql 3 | import json 4 | import hashlib 5 | from netlib.NetBase import nbNet 6 | 7 | monTables = [ 8 | 'stat_0', 9 | 'stat_1', 10 | 'stat_2', 11 | 'stat_3', 12 | ] 13 | 14 | db = mysql.connect(user="root", passwd="123456",charset="utf8") 15 | cur = db.cursor() 16 | #db.autocommit(True) 17 | try: 18 | cur.execute('create database dbtest') 19 | except: 20 | print 'Database dbtest exists!' 21 | 22 | db.select_db('dbtest') 23 | try: 24 | for i in range(4): 25 | sql = "CREATE TABLE `stat_%d` (`host` char(20),`mem_free` \ 26 | int(10),`mem_usage` int(10),`mem_total` int(10),`load_avg` \ 27 | char(20),`time` int(20))" %i 28 | cur.execute(sql) 29 | except: 30 | print 'TABLE exists' 31 | 32 | def fnvhash(string): 33 | ret = 97 34 | for i in string: 35 | ret = ret ^ ord(i) * 13 36 | return ret 37 | 38 | def insertMonData(mondata): 39 | try: 40 | data = json.loads(mondata) 41 | timeOfData = int(data['Time']) 42 | hostIndex = monTables[fnvhash(data['Host']) % len(monTables)] 43 | sql = "insert into `%s` (`host`,`mem_free`,`mem_usage`,`mem_total`,`load_avg`,`time`) VALUES('%s', '%d', '%d', '%d', '%s', '%d')" % \ 44 | (hostIndex, data['Host'], data['MemFree'], data['MemUsage'], data['MemTotal'], data['LoadAvg'], timeOfData) 45 | ret = cur.execute(sql) 46 | except mysql.IntegrityError: 47 | pass 48 | 49 | def logic(d_in): 50 | insertMonData(d_in) 51 | return "OK" 52 | def start(): 53 | saverD = nbNet('0.0.0.0', 50003, logic) 54 | saverD.run() 55 | 56 | if __name__ == "__main__": 57 | start() 58 | 59 | 60 | 61 | 62 | -------------------------------------------------------------------------------- /My_lib/Example/saver/netlib/.gitignore: -------------------------------------------------------------------------------- 1 | /Power.py 2 | -------------------------------------------------------------------------------- /My_lib/Example/saver/netlib/NetUtils.py: -------------------------------------------------------------------------------- 1 | #-*- coding:utf-8 -*- 2 | from inspect import currentframe 3 | import socket 4 | import select 5 | import time 6 | 7 | DEBUG = False 8 | 9 | #{{{get_linenumber 10 | def get_linenumber(): 11 | cf = currentframe() 12 | return str(cf.f_back.f_back.f_lineno) 13 | #}}} 14 | 15 | #{{{dbgPrint 16 | def dbgPrint(msg): 17 | if DEBUG: 18 | print get_linenumber(), msg 19 | #}}} 20 | 21 | import signal, functools 22 | 23 | class TimeoutError(Exception):pass 24 | 25 | #{{{timeout 26 | def timeout(seconds, error_message="function call time out"): 27 | 28 | def decorated(func): 29 | def _handle_timeout(signum, frame): 30 | raise TimeoutError(error_message); 31 | def wrapper(*args, **kwargs): 32 | signal.signal(signal.SIGALRM, _handle_timeout) 33 | signal.alarm(seconds) 34 | try: 35 | result = func(*args, **kwargs) 36 | finally: 37 | signal.alarm(10) 38 | return result 39 | return functools.wraps(func)(wrapper) 40 | return decorated 41 | #}}} 42 | 43 | @timeout(5) 44 | #{{{connect_timeout 45 | def connect_timeout(socket, host_port): 46 | return socket.connect(host_port) 47 | #}}} 48 | #{{{sendData_mh 49 | def sendData_mh(sock_list, host_list, data, single_host_retry=3): 50 | """ 51 | saver_list = [host1:port, host2:port, host3:port] 52 | sock_list = [some socket] 53 | """ 54 | done = False 55 | for host_port in host_list: 56 | if done: 57 | break 58 | host, port = host_port.split(":") 59 | port = int(port) 60 | retry = 0 61 | while retry < single_host_retry: 62 | try: 63 | if sock_list[0] == None: 64 | sock_list[0] = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 65 | sock_list[0].settimeout(5) 66 | sock_list[0].connect((host, port)) 67 | d = data 68 | sock_list[0].sendall("%010d%s" % (len(d), d)) 69 | count = sock_list[0].recv(10) 70 | if not count: 71 | raise Exception("recv error") 72 | count = int(count) 73 | buf = sock_list[0].recv(count) 74 | if buf[:2] == "OK": 75 | retry = 0 76 | break 77 | 78 | except: 79 | sock_list[0].close() 80 | sock_list[0] = None 81 | retry += 1 82 | #}}} 83 | #{{{sendData 84 | def sendData(sock_l, host, port, data): 85 | retry = 0 86 | while retry < 3: 87 | try: 88 | if sock_l[0] == None: 89 | sock_l[0] = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 90 | sock_l[0].connect((host, port)) 91 | dbgPrint("\n-- start connect %s:%d" %(host, port)) 92 | d = data 93 | sock_l[0].sendall("%010d%s" %(len(data), data)) 94 | count = sock_l[0].recv(10) 95 | if not count: 96 | raise Exception("recv error") 97 | buf = sock_l[0].recv(int(count)) 98 | dbgPrint("recv data: %s" % buf) 99 | if buf[:2] == "OK": 100 | retry = 0 101 | break 102 | except: 103 | sock_l[0].close() 104 | sock_l[0] = None 105 | retry += 1 106 | #}}} 107 | # initial status for state machine 108 | #{{{STATE 109 | class STATE: 110 | def __init__(self): 111 | self.state = "accept" 112 | self.have_read = 0 113 | self.need_read = 10 114 | self.have_write = 0 115 | self.need_write = 0 116 | self.buff_write = "" 117 | self.buff_read = "" 118 | # sock_obj is a object 119 | self.sock_obj = "" 120 | 121 | def printState(self): 122 | if DEBUG: 123 | dbgPrint('\n - current state of fd: %d' % self.sock_obj.fileno()) 124 | dbgPrint(" - - state: %s" % self.state) 125 | dbgPrint(" - - have_read: %s" % self.have_read) 126 | dbgPrint(" - - need_read: %s" % self.need_read) 127 | dbgPrint(" - - have_write: %s" % self.have_write) 128 | dbgPrint(" - - need_write: %s" % self.need_write) 129 | dbgPrint(" - - buff_write: %s" % self.buff_write) 130 | dbgPrint(" - - buff_read: %s" % self.buff_read) 131 | dbgPrint(" - - sock_obj: %s" % self.sock_obj) 132 | #}}} 133 | -------------------------------------------------------------------------------- /My_lib/Example/saver/netlib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/meetbill/MyPythonLib/fd58b8a112ad366fba67771cafd752f0b8f378ee/My_lib/Example/saver/netlib/__init__.py -------------------------------------------------------------------------------- /My_lib/Example/saver/netlib/daemon.py: -------------------------------------------------------------------------------- 1 | #-*- coding:utf-8 -*- 2 | import sys, os, time, atexit 3 | from signal import SIGTERM 4 | 5 | class Daemon(object): 6 | """ 7 | A generic daemon class. 8 | 9 | Usage: subclass the Daemon class and override the run() method 10 | """ 11 | def __init__(self, pidfile='nbMon.pid', stdin='/dev/null', stdout='nbMon.log', stderr='nbMon.log'): 12 | self.stdin = stdin 13 | self.stdout = stdout 14 | self.stderr = stderr 15 | self.pidfile = pidfile 16 | 17 | def daemonize(self): 18 | """ 19 | do the UNIX double-fork magic, see Stevens' "Advanced 20 | Programming in the UNIX Environment" for details (ISBN 0201563177) 21 | http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 22 | """ 23 | try: 24 | pid = os.fork() 25 | if pid > 0: 26 | # exit first parent 27 | sys.exit(0) 28 | except OSError, e: 29 | sys.stderr.write("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror)) 30 | sys.exit(1) 31 | 32 | # decouple from parent environment 33 | #os.chdir("/") 34 | os.setsid() 35 | os.umask(0) 36 | 37 | # do second fork 38 | try: 39 | pid = os.fork() 40 | if pid > 0: 41 | # exit from second parent 42 | sys.exit(0) 43 | except OSError, e: 44 | sys.stderr.write("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror)) 45 | sys.exit(1) 46 | 47 | # redirect standard file descriptors 48 | sys.stdout.flush() 49 | sys.stderr.flush() 50 | si = file(self.stdin, 'r') 51 | so = file(self.stdout, 'a+') 52 | se = file(self.stderr, 'a+', 0) 53 | os.dup2(si.fileno(), sys.stdin.fileno()) 54 | os.dup2(so.fileno(), sys.stdout.fileno()) 55 | os.dup2(se.fileno(), sys.stderr.fileno()) 56 | 57 | # write pidfile 58 | atexit.register(self.delpid) 59 | pid = str(os.getpid()) 60 | file(self.pidfile,'w+').write("%s\n" % pid) 61 | 62 | def delpid(self): 63 | os.remove(self.pidfile) 64 | 65 | def start(self): 66 | """ 67 | Start the daemon 68 | """ 69 | # Check for a pidfile to see if the daemon already runs 70 | try: 71 | pf = file(self.pidfile,'r') 72 | pid = int(pf.read().strip()) 73 | pf.close() 74 | except IOError: 75 | pid = None 76 | 77 | if pid: 78 | message = "pidfile %s already exist. Daemon already running?\n" 79 | sys.stderr.write(message % self.pidfile) 80 | sys.exit(1) 81 | 82 | # Start the daemon 83 | self.daemonize() 84 | self.run() 85 | 86 | def stop(self): 87 | """ 88 | Stop the daemon 89 | """ 90 | # Get the pid from the pidfile 91 | try: 92 | pf = file(self.pidfile,'r') 93 | pid = int(pf.read().strip()) 94 | pf.close() 95 | except IOError: 96 | pid = None 97 | 98 | if not pid: 99 | message = "pidfile %s does not exist. Daemon not running?\n" 100 | sys.stderr.write(message % self.pidfile) 101 | return # not an error in a restart 102 | 103 | # Try killing the daemon process 104 | try: 105 | while 1: 106 | os.kill(pid, SIGTERM) 107 | time.sleep(0.1) 108 | except OSError, err: 109 | err = str(err) 110 | if err.find("No such process") > 0: 111 | if os.path.exists(self.pidfile): 112 | os.remove(self.pidfile) 113 | else: 114 | print str(err) 115 | sys.exit(1) 116 | 117 | def restart(self): 118 | """ 119 | Restart the daemon 120 | """ 121 | self.stop() 122 | self.start() 123 | 124 | def run(self): 125 | """ 126 | You should override this method when you subclass Daemon. It will be called after the process has been 127 | daemonized by start() or restart(). 128 | """ 129 | 130 | -------------------------------------------------------------------------------- /My_lib/color/color.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # coding=utf8 3 | """ 4 | # Author: meetbill 5 | # Created Time : 2018-07-13 23:31:15 6 | 7 | # File Name: color.py 8 | # Description: 9 | 10 | """ 11 | """ 12 | 格式:\033[显示方式;前景色;背景色m 13 | 14 | 说明: 15 | 前景色 背景色 颜色 16 | --------------------------------------- 17 | 30 40 黑色 18 | 31 41 红色 19 | 32 42 绿色 20 | 33 43 黃色 21 | 34 44 蓝色 22 | 35 45 紫红色 23 | 36 46 青蓝色 24 | 37 47 白色 25 | 26 | 显示方式 意义 27 | ------------------------- 28 | 0 终端默认设置 29 | 1 高亮显示 30 | 4 使用下划线 31 | 5 闪烁 32 | 7 反白显示 33 | 8 不可见 34 | """ 35 | 36 | 37 | def colored(text, color=None, on_color=None, attrs=None): 38 | fmt_str = '\x1B[;%dm%s\x1B[0m' 39 | if color is not None: 40 | text = fmt_str % (color, text) 41 | 42 | if on_color is not None: 43 | text = fmt_str % (on_color, text) 44 | 45 | if attrs is not None: 46 | for _ in attrs: 47 | text = fmt_str % (color, text) 48 | 49 | return text 50 | 51 | 52 | def print_error(msg): 53 | print colored(msg, color=31) 54 | 55 | 56 | def print_warning(msg): 57 | print colored(msg, color=33) 58 | 59 | 60 | def print_info(msg): 61 | print colored(msg, color=32) 62 | 63 | 64 | def print_log(msg): 65 | print colored(msg, color=35) 66 | 67 | 68 | def print_debug(msg): 69 | print colored(msg, color=36) 70 | 71 | 72 | def color_print(msg, level='info'): 73 | color_print_dict = { 74 | "error": print_error, 75 | "info": print_info, 76 | "log": print_log, 77 | "warning": print_warning, 78 | "debug": print_debug 79 | # add more 80 | } 81 | if level in color_print_dict: 82 | color_print_dict[level](msg) 83 | else: 84 | print msg 85 | 86 | 87 | if __name__ == '__main__': 88 | # test 89 | print_error("this is an error message!") 90 | print_warning("this is a warning message!") 91 | print_info("this ia a info message!") 92 | print_log('this is a log message!') 93 | print_debug('this is a debug message!') 94 | color_print('test info message!', 'info') 95 | -------------------------------------------------------------------------------- /My_lib/command_utils/README.md: -------------------------------------------------------------------------------- 1 | ## command_utils 2 | 3 | 4 | 5 | * [1 command_class](#1-command_class) 6 | * [2 command](#2-command) 7 | * [2.1 使用](#21-使用) 8 | * [2.2 内部原理](#22-内部原理) 9 | * [2.2.1 根据 globals 内置函数获取全部全局变量](#221-根据-globals-内置函数获取全部全局变量) 10 | * [2.2.2 获取函数的参数](#222-获取函数的参数) 11 | 12 | 13 | 14 | ## 1 command_class 15 | 16 | 作用:方便命令行调用模块中类中的方法(推荐), 更新程序时仅需添加方法,非常方便 17 | 18 | ``` 19 | [root@meetbill ~]# python command_class.py 20 | usage: python command_class.py function param [options] 21 | 22 | optional arguments: 23 | -h, --help show this help message and exit 24 | None 25 | Python Library Documentation: class ceshi_class in module __main__ 26 | class ceshi_class 27 | | ceshi 28 | | 29 | | Methods defined here: 30 | | 31 | | output(self, str_info) 32 | | eg:python command_class.py output "hello world" 33 | | 34 | | output2(self, str_info='happy') 35 | | eg:python command_class.py output2 "hello world" 36 | 37 | ``` 38 | 39 | 直接输入 `python command_class.py output "hello world"`即可调用此模块 `ceshi_class` 类中的 output 方法 40 | 41 | ## 2 command 42 | 43 | 作用:方便调用模块中的方法 44 | 45 | ``` 46 | #python command.py 47 | Usage: 48 | command.py hello 'str_info' 49 | ``` 50 | ### 2.1 使用 51 | 52 | 不太清楚参数的含义时,可以直接执行方法,而不带参数 53 | ``` 54 | #python command.py hello 55 | Usage: 56 | python hello 'str_info' 57 | str_info: string 58 | hello() takes exactly 1 argument (0 given) 59 | Traceback (most recent call last): 60 | File "command.py", line 24, in 61 | r = func(*args) 62 | TypeError: hello() takes exactly 1 argument (0 given) 63 | ``` 64 | 65 | 此时会输出方法使用说明(参数说明在函数名下填写)和对应错误信息 66 | 67 | ### 2.2 内部原理 68 | 69 | #### 2.2.1 根据 globals 内置函数获取全部全局变量 70 | 71 | globals() 函数会以字典类型返回当前位置的全部全局变量。 72 | ``` 73 | { 74 | 'Ceshi_class': < class __main__.Ceshi_class at 0x103b4eae0 > , 75 | 'root_path': '/private/tmp', 76 | '__builtins__': < module '__builtin__' (built - in ) > , 77 | '__file__': 'command.py', 78 | 'inspect': < module 'inspect' from 'xxx/python2.7/inspect.pyc' > , 79 | '__doc__': None, 80 | 'sys': < module 'sys' (built - in ) > , 81 | '_usage': < function _usage at 0x103c02aa0 > , 82 | 'time': < module 'time' from 'xxx/python2.7/lib-dynload/time.so' > , 83 | '__name__': '__main__', 84 | '__package__': None, 85 | 'os': < module 'os' from 'xxx/python2.7/os.pyc' > , 86 | 'hello': < function hello at 0x103b70668 > 87 | } 88 | ``` 89 | 获取可用的函数和类 90 | > * globals() 中判断是否为函数:inspect.isfunction(v) 91 | > * globals() 中判断是否为类:inspect.isclass(v) 92 | > * 类中根据 v.__dict__ 判断是否有 staticmethod 或者 classmethod 93 | 94 | v.__dict__ 内容如下 95 | ``` 96 | { 97 | '__module__': '__main__', 98 | 'ceshi_func2': < staticmethod object at 0x1039d1590 > , 99 | '__doc__': None, 100 | 'ceshi_func1': < classmethod object at 0x1039d1558 > 101 | } 102 | ``` 103 | 备注:类中的 staticmethod 和 classmethod ,使用 inspect.isfunction() 判断时为 False, 所以使用 str 进行匹配判断 104 | 105 | #### 2.2.2 获取函数的参数 106 | 107 | inspect.getargspec(v) 108 | -------------------------------------------------------------------------------- /My_lib/command_utils/command.py: -------------------------------------------------------------------------------- 1 | def hello(str_info, test="world"): 2 | """ 3 | Args: 4 | str_info: string 5 | test: string,defaults:'world' 6 | """ 7 | print "hello %s -- %s" % (test, str_info) 8 | 9 | 10 | class CheckCeshiClass(): 11 | @classmethod 12 | def ceshi_func1(cls, str_info, test="world"): 13 | """ test classmethod 14 | Args: 15 | str_info: string 16 | test: string,defaults:'world' 17 | """ 18 | print "hello %s -- %s" % (test, str_info) 19 | 20 | @staticmethod 21 | def ceshi_func2(str_info): 22 | """ test staticmethod 23 | Args: 24 | str_info: string 25 | """ 26 | print "hello", str_info 27 | 28 | 29 | if __name__ == '__main__': 30 | import sys 31 | import inspect 32 | import time 33 | import os 34 | root_path = os.path.split(os.path.realpath(__file__))[0] 35 | os.chdir(root_path) 36 | 37 | def _usage(class_name=None): 38 | print "Usage:" 39 | prefix = "Check" 40 | if class_name is None: 41 | for k, v in sorted(globals().items(), key=lambda item: item[0]): 42 | if inspect.isfunction(v) and k[0] != "_": 43 | args, __, __, defaults = inspect.getargspec(v) 44 | if defaults: 45 | print sys.argv[0], k, str(args[:-len(defaults)])[1:-1].replace(",", ""), \ 46 | str(["%s=%s" % (a, b) for a, b in zip( 47 | args[-len(defaults):], defaults)])[1:-1].replace(",", "") 48 | else: 49 | print sys.argv[0], k, str(v.func_code.co_varnames[:v.func_code.co_argcount])[ 50 | 1:-1].replace(",", "") 51 | if inspect.isclass(v) and k.startswith(prefix): 52 | print sys.argv[0], k 53 | sys.exit(-1) 54 | if class_name not in globals(): 55 | print "not found class_name[%s]" % class_name 56 | sys.exit(-1) 57 | for class_k, class_v in sorted( 58 | globals()[class_name].__dict__.items(), key=lambda item: item[0]): 59 | if str(class_v)[0] == "_": 60 | continue 61 | if str(class_v).startswith(" 'str_info1' 'str_info2' 69 | # 70 | if defaults: 71 | args_all = str(args[:-len(defaults)])[1:-1].replace(",", ""), \ 72 | str(["%s=%s" % (a, b) for a, b in zip(args[-len(defaults):], defaults)])[1:-1].replace(",", "") 73 | else: 74 | args_all = str(v.func_code.co_varnames[:v.func_code.co_argcount])[1:-1].replace(",", "") 75 | 76 | if not isinstance(args_all, tuple): 77 | args_all = tuple(args_all.split(" ")) 78 | 79 | exe_info = "{file_name} {func_name} {args_all}".format( 80 | file_name=sys.argv[0], 81 | func_name=k, 82 | args_all=" ".join(args_all)) 83 | print(exe_info) 84 | 85 | # output func_doc 86 | if func_name and v.func_doc: 87 | print("\n".join(["\t" + line.strip() for line in v.func_doc.strip().split("\n")])) 88 | 89 | print("-------------------------------------------------") 90 | 91 | if len(sys.argv) < 2: 92 | _usage() 93 | sys.exit(-1) 94 | else: 95 | func = eval(sys.argv[1]) 96 | args = sys.argv[2:] 97 | try: 98 | r = func(*args) 99 | except Exception: 100 | _usage(func_name=sys.argv[1]) 101 | 102 | r = -1 103 | import traceback 104 | traceback.print_exc() 105 | 106 | if isinstance(r, int): 107 | sys.exit(r) 108 | -------------------------------------------------------------------------------- /My_lib/daemon/README.md: -------------------------------------------------------------------------------- 1 | ## 使用 2 | 3 | 4 | 5 | * [启动后如何查看进程](#启动后如何查看进程) 6 | * [原理](#原理) 7 | * [关于两次 fork](#关于两次-fork) 8 | 9 | 10 | 11 | 需要修改部分 12 | 13 | * pid 文件位置 14 | * 运行程序 15 | 16 | ``` 17 | class MyDaemon(Daemon): 18 | def run(self): 19 | ##########################################需要修改部分 20 | ceshi.ceshi() 21 | ########################################## 22 | ``` 23 | ## 启动后如何查看进程 24 | 25 | * pid 文件是否存在 26 | * ps -ef | grep python 27 | * python ./run_daemon.py agent status 28 | 29 | ## 原理 30 | ``` 31 | linux 创建守护进程的步骤如下: 32 | 33 | 1 创建子进程,父进程退出 34 | 35 | 这是创建守护进程的第一步。由于守护进程是脱离控制终端的,因此,完成第一步后就会在 Shell 36 | 终端里造成一程序已经运行完毕的假象。之后的所有工作都在子进程中完成,而用户在 Shell 终端里则 37 | 可以执行其他命令,从而在形式上做到了与控制终端的脱离。在 Linux 中父进程先于子进程退出会造成 38 | 子进程成为孤儿进程,而每当系统发现一个孤儿进程是,就会自动由 1 号进程(init)收养它,这样, 39 | 原先的子进程就会变成 init 进程的子进程。 40 | 41 | 2 在子进程中创建新会话 42 | 43 | Linux 是一个多用户多任务系统,每个进程都有一个进程 ID,同时每个进程还都属于某一个进程组, 44 | 而每个进程组都有一个组长进程,组长进程的标识 ID 等于进程组的 ID,且该进程组 ID 不会因组长进程的 45 | 退出而受到影响。会话期是一个或多个进程组的集合,通常,一个会话开始与用户登录,终止于用户退 46 | 出,在此期间该用户运行的所有进程都属于这个会话期。一个会话期可以有一个单独的控制终端,只有 47 | 其前台进程才可以拥有控制终端,实现与用户的交互。从 shell 中启动的每一个进程将继承一个与之相结 48 | 合的终端,以便进程与用户交互,但是守护进程不需要这些,子进程继承父进程的会话期和进程组 ID, 49 | 子进程会受到发送给该会话期的信号的影响,所以守护进程应该创建一个新的会话期,这个步骤是创建 50 | 守护进程中最重要的一步,虽然它的实现非常简单,但它的意义却非常重大。在这里使用的是系统函数 51 | setsid 来实现的。 52 | 53 | setsid 函数用于创建一个新的会话,并担任该会话组的组长。调用 setsid 有下面的 3 个作用: 54 | 让进程摆脱原会话的控制 55 | 让进程摆脱原进程组的控制 56 | 让进程摆脱原控制终端的控制 57 | 58 | 由于创建守护进程的第一步调用了 fork 函数来创建子进程,再将父进程退出。在调用 fork 函数时, 59 | 子进程全盘拷贝了父进程的会话期、进程组、控制终端等,虽然父进程退出了,但会话期、进程组、控 60 | 制终端等并没有改变,因此,还还不是真正意义上的独立开来,而 setsid 函数能够使进程完全独立出来 61 | ,从而摆脱其他进程的控制。 62 | 63 | 3 改变当前目录为根目录 64 | 65 | 使用 fork 创建的子进程继承了父进程的当前工作目录。守护进程不应当使用父进程的工作目录,应 66 | 该设置自己的工作目录,通常可以通过 chdir() 来完成,一般可以将其设置为根目录,不过有些守护进程 67 | 需要将它设置到自己特定的工作目录,但此时必须保证所设置的工作目录处于一个不能卸载的文件系统 68 | 中,因为守护进程通常在系统引导后是一直存在的。 69 | 70 | 4 重设文件权限掩码 71 | 72 | 守护进程从父进程继承来的文件创建方式掩码可能会拒绝设置某些许可权限,文件权限掩码是指屏 73 | 蔽掉文件权限中的对应位。比如,有个文件权限掩码是 050,它就屏蔽了文件组拥有者的可读与可执行权 74 | 限。由于使用 fork 函数新建的子进程继承了父进程的文件权限掩码,可能使守护进程的执行出现问题, 75 | 因此,把文件权限掩码设置为 0,可以大大增强该守护进程的灵活性。设置文件权限掩码的函数是 umask。 76 | 在这里,通常的使用方法为 umask(0)。 77 | 78 | 5 关闭文件描述符 79 | 80 | 一般情况下,进程启动时都会自动打开终端文件,但是守护进程已经与终端脱离,所以终端描述符应 81 | 该关闭。用 fork 函数新建的子进程也会从父进程那里继承一些已经打开了的文件。这些被打开的文件可能 82 | 永远不会被守护进程读写,但它们一样消耗系统资源,而且可能导致所在的文件系统无法卸下。 83 | 84 | 6 忽略 SIGCHLD 信号 85 | 86 | 这一步只对需要创建子进程的守护进程才有必要,很多服务器守护进程设计成通过派生子进程来处理 87 | 客户端的请求,如果父进程不对 SIGCHLD 信号进行处理的话,子进程在终止后变成僵尸进程,通过将信号 88 | SIGCHLD 的处理方式设置为 SIG_IGN 可以避免这种情况发生。 89 | 90 | 7 用日志系统记录出错信息 91 | 92 | 因为守护进程没有控制终端,当进程出现错误时无法写入到标准输出上,可以通过调用 syslog 将出 93 | 错信息写入到指定的文件中。 94 | 95 | 8 守护进程退出处理 96 | 97 | 当用户需要外部停止守护进程运行时,往往会使用 kill 命令停止该守护进程。所以,守护进程中需要 98 | 编码来实现 kill 发出的 signal 信号处理,达到进程的正常退出。 99 | ``` 100 | 101 | ## 关于两次 fork 102 | 103 | 第二个 fork 不是必须的,只是为了防止进程打开控制终端。 104 | 105 | 打开一个控制终端的条件是该进程必须是 session leader。第一次 fork,setsid 之后,子进程成为 session leader,进程可以打开终端;第二次 fork 产生的进程,不再是 session leader,进程则无法打开终端。 106 | 107 | 也就是说,只要程序实现得好,控制程序不主动打开终端,无第二次 fork 亦可。 108 | -------------------------------------------------------------------------------- /My_lib/daemon/ceshi.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | #coding=utf8 3 | """ 4 | # Author: meetbill 5 | # Created Time : 2017-09-01 15:42:47 6 | 7 | # File Name: ceshi.py 8 | # Description: 9 | 10 | """ 11 | import time 12 | 13 | def ceshi(): 14 | while True: 15 | print "helloworld" 16 | print "Start : %s" % time.ctime() 17 | time.sleep(1) 18 | print "End : %s" % time.ctime() 19 | if __name__ == "__main__": 20 | ceshi() 21 | -------------------------------------------------------------------------------- /My_lib/daemon/run_daemon.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding=utf8 3 | import os 4 | import sys 5 | from xlib import daemon 6 | 7 | import ceshi 8 | class MyDaemon(daemon.Daemon): 9 | def run(self): 10 | ##########################################需要修改部分 11 | ceshi.ceshi() 12 | ########################################## 13 | 14 | if __name__ == "__main__": 15 | ###################################### 16 | # edit this code 17 | cur_dir = os.getcwd() 18 | if not os.path.exists("{cur_dir}/run/".format(cur_dir=cur_dir)): 19 | os.makedirs("./run") 20 | 21 | if not os.path.exists("{cur_dir}/log/".format(cur_dir=cur_dir)): 22 | os.makedirs("./log") 23 | 24 | my_daemon = MyDaemon( 25 | pidfile="{cur_dir}/run/daemon.pid".format(cur_dir=cur_dir), 26 | stdout="{cur_dir}/log/daemon_stdout.log".format(cur_dir=cur_dir), 27 | stderr="{cur_dir}/log/daemon_stderr.log".format(cur_dir=cur_dir) 28 | ) 29 | 30 | if len(sys.argv) == 3: 31 | daemon_name = sys.argv[1] 32 | if 'start' == sys.argv[2]: 33 | my_daemon.start() 34 | elif 'stop' == sys.argv[2]: 35 | my_daemon.stop() 36 | elif 'restart' == sys.argv[2]: 37 | my_daemon.restart() 38 | elif 'status' == sys.argv[2]: 39 | alive = my_daemon.is_running() 40 | if alive: 41 | print('process [%s] is running ......' % my_daemon.get_pid()) 42 | else: 43 | print('daemon process [%s] stopped' % daemon_name) 44 | else: 45 | print "Unknown command" 46 | sys.exit(2) 47 | sys.exit(0) 48 | else: 49 | print "usage: %s agent|server start|stop|restart|status" % sys.argv[0] 50 | sys.exit(2) 51 | -------------------------------------------------------------------------------- /My_lib/daemon/xlib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/meetbill/MyPythonLib/fd58b8a112ad366fba67771cafd752f0b8f378ee/My_lib/daemon/xlib/__init__.py -------------------------------------------------------------------------------- /My_lib/daemon/xlib/daemon.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | import sys 3 | import os 4 | import time 5 | import atexit 6 | from signal import SIGTERM 7 | 8 | 9 | class Daemon(object): 10 | """ 11 | A generic daemon class. 12 | 13 | Usage: subclass the Daemon class and override the run() method 14 | 15 | 编写守护进程的一般步骤步骤: 16 | (1)创建自己成并被 init 进程接管:在父进程中执行 fork 并 exit 退出; 17 | (2)创建新进程组和新会话:在子进程中调用 setsid 函数创建新的会话; 18 | (3)修改子进程的工作目录:在子进程中调用 chdir 函数,让根目录 "/" 成为子进程的工作目录; 19 | (4)修改子进程 umask:在子进程中调用 umask 函数,设置进程的 umask 为 0; 20 | (5)在子进程中关闭任何不需要的文件描述符 21 | 22 | 在子进程中再次 fork 一个进程,这个进程称为孙子进程,之后子进程退出 23 | 重定向孙子进程的标准输入流、标准输出流、标准错误流到 /dev/null 24 | 那么最终的孙子进程就称为守护进程。 25 | """ 26 | 27 | def __init__(self, 28 | pidfile='./run/daemon.pid', 29 | stdin='/dev/null', 30 | stdout='./log/stdout.log', 31 | stderr='./log/stderr.log', 32 | home_dir="/" 33 | ): 34 | self.stdin = stdin 35 | self.stdout = stdout 36 | self.stderr = stderr 37 | self.pidfile = pidfile 38 | self.home_dir = home_dir 39 | 40 | def daemonize(self): 41 | """ 42 | do the UNIX double-fork magic, see Stevens' "Advanced 43 | Programming in the UNIX Environment" for details (ISBN 0201563177) 44 | http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16 45 | """ 46 | try: 47 | pid = os.fork() 48 | if pid > 0: 49 | # exit first parent 50 | sys.exit(0) 51 | except OSError as e: 52 | sys.stderr.write( 53 | "fork #1 failed: %d (%s)\n" % 54 | (e.errno, e.strerror)) 55 | sys.exit(1) 56 | 57 | # decouple from parent environment 58 | # 防止占用别的路径的 working dir 的 fd,导致一些 block 不能 unmount ,默认 home_dir 为根目录 59 | os.chdir(self.home_dir) 60 | # 创建新会话,子进程成为新会话的首进程(session leader) 61 | ''' 62 | setsid()函数可以建立一个对话期。 63 | 64 | 会话期(session)是一个或多个进程组的集合。 65 | 如果,调用 setsid 的进程不是一个进程组的组长,此函数创建一个新的会话期。 66 | (1)此进程变成该对话期的首进程 67 | (2)此进程变成一个新进程组的组长进程。 68 | (3)此进程没有控制终端,如果在调用setsid前,该进程有控制终端,那么与该终端的联系被解除。 如果该进程是一个进程组的组长,此函数返回错误。 69 | (4)为了保证这一点,我们先调用fork()然后exit(),此时只有子进程在运行 70 | ''' 71 | # 创建新的会话,子进程成为会话的首进程 72 | # 控制终端,登录会话和进程组通常是从父进程继承下来的。我们的目的就是要摆脱它们,使之不受它们的影响。方法是在创建子进程的基础上,调用setsid()使进程成为会话组长 73 | os.setsid() 74 | 75 | ''' 76 | 由于umask会屏蔽权限,所以设定为0,这样可以避免读写文件时碰到权限问题。 77 | ''' 78 | os.umask(0) 79 | 80 | ''' 81 | 现在,进程已经成为无终端的会话组长。但它可以重新申请打开一个控制终端。可以通过使进程不再成为会话组长来禁止进程重新打开控制终端: 82 | 83 | 关于两次fork 84 | 第二个fork 不是必须的,只是为了防止进程打开控制终端。 85 | 打开一个控制终端的条件是该进程必须是 session leader。第一次 fork,setsid 之后,子进程成为 session leader,进程可以打开终端;第二次 fork 产生的进程,不再是 session leader,进程则无法打开终端。 86 | 也就是说,只要程序实现得好,控制程序不主动打开终端,无第二次fork亦可。 87 | ''' 88 | # do second fork 89 | try: 90 | # 创建孙子进程,而后子进程退出 91 | # 新创建的孙子进程,不是会话组长 92 | pid = os.fork() 93 | if pid > 0: 94 | # exit from second parent 95 | sys.exit(0) 96 | except OSError as e: 97 | sys.stderr.write( 98 | "fork #2 failed: %d (%s)\n" % 99 | (e.errno, e.strerror)) 100 | sys.exit(1) 101 | 102 | # redirect standard file descriptors 103 | sys.stdout.flush() 104 | sys.stderr.flush() 105 | si = file(self.stdin, 'r') 106 | so = file(self.stdout, 'a+') 107 | se = file(self.stderr, 'a+', 0) 108 | # os.dup2(fd, fd2); 后将 fd 代表的那个文件(可以想象成是P_fd指针)强行复制给 fd2 109 | # 也就是重定向,将标准输入,标准输出,标准错误重定向到指定文件中 110 | os.dup2(si.fileno(), sys.stdin.fileno()) 111 | os.dup2(so.fileno(), sys.stdout.fileno()) 112 | os.dup2(se.fileno(), sys.stderr.fileno()) 113 | 114 | # write pidfile 115 | atexit.register(self.delpid) 116 | pid = str(os.getpid()) 117 | file(self.pidfile, 'w+').write("%s\n" % pid) 118 | 119 | def delpid(self): 120 | os.remove(self.pidfile) 121 | 122 | def start(self): 123 | """ 124 | Start the daemon 125 | """ 126 | # Check for a pidfile to see if the daemon already runs 127 | try: 128 | pf = file(self.pidfile, 'r') 129 | pid = int(pf.read().strip()) 130 | pf.close() 131 | except IOError: 132 | pid = None 133 | 134 | if pid: 135 | message = "pidfile %s already exist. Daemon already running?\n" 136 | sys.stderr.write(message % self.pidfile) 137 | sys.exit(1) 138 | 139 | # Start the daemon 140 | self.daemonize() 141 | self.run() 142 | 143 | def stop(self): 144 | """ 145 | Stop the daemon 146 | """ 147 | # Get the pid from the pidfile 148 | try: 149 | pf = file(self.pidfile, 'r') 150 | pid = int(pf.read().strip()) 151 | pf.close() 152 | except IOError: 153 | pid = None 154 | 155 | if not pid: 156 | message = "pidfile %s does not exist. Daemon not running?\n" 157 | sys.stderr.write(message % self.pidfile) 158 | return # not an error in a restart 159 | 160 | # Try killing the daemon process 161 | try: 162 | while True: 163 | os.kill(pid, SIGTERM) 164 | time.sleep(0.1) 165 | except OSError as err: 166 | err = str(err) 167 | if err.find("No such process") > 0: 168 | if os.path.exists(self.pidfile): 169 | os.remove(self.pidfile) 170 | else: 171 | print str(err) 172 | sys.exit(1) 173 | 174 | def restart(self): 175 | """ 176 | Restart the daemon 177 | """ 178 | self.stop() 179 | self.start() 180 | 181 | def run(self): 182 | """ 183 | You should override this method when you subclass Daemon. It will be called after the process has been 184 | daemonized by start() or restart(). 185 | """ 186 | 187 | def get_pid(self): 188 | try: 189 | pf = open(self.pidfile, 'r') 190 | pid = int(pf.read().strip()) 191 | pf.close() 192 | except IOError: 193 | pid = None 194 | except SystemExit: 195 | pid = None 196 | return pid 197 | 198 | def is_running(self): 199 | pid = self.get_pid() 200 | # print(pid) 201 | return pid and os.path.exists('/proc/%d' % pid) 202 | 203 | 204 | if __name__ == "__main__": 205 | def helloworld(): 206 | import time 207 | # stdout 文件默认开启的缓冲写,所以需要隔一段时间才能看到日志中有文件写入 208 | # open 函数中有一个 bufferin 的参数,默认是 -1,如果设置为 0 时,就是无缓冲模式 209 | # open("./log/test.txt",'a+',buffering=0) 210 | while True: 211 | print "helloworld" 212 | print "Start : %s" % time.ctime() 213 | time.sleep(1) 214 | print "End : %s" % time.ctime() 215 | 216 | class MyDaemon(Daemon): 217 | def run(self): 218 | helloworld() 219 | ###################################### 220 | # edit this code 221 | cur_dir = os.getcwd() 222 | if not os.path.exists("{cur_dir}/run/".format(cur_dir=cur_dir)): 223 | os.makedirs("./run") 224 | 225 | if not os.path.exists("{cur_dir}/log/".format(cur_dir=cur_dir)): 226 | os.makedirs("./log") 227 | 228 | my_daemon = MyDaemon( 229 | pidfile="{cur_dir}/run/daemon.pid".format(cur_dir=cur_dir), 230 | stdout="{cur_dir}/log/daemon_stdout.log".format(cur_dir=cur_dir), 231 | stderr="{cur_dir}/log/daemon_stderr.log".format(cur_dir=cur_dir) 232 | ) 233 | 234 | if len(sys.argv) == 3: 235 | daemon_name = sys.argv[1] 236 | if 'start' == sys.argv[2]: 237 | my_daemon.start() 238 | elif 'stop' == sys.argv[2]: 239 | my_daemon.stop() 240 | elif 'restart' == sys.argv[2]: 241 | my_daemon.restart() 242 | elif 'status' == sys.argv[2]: 243 | alive = my_daemon.is_running() 244 | if alive: 245 | print('process [%s] is running ......' % my_daemon.get_pid()) 246 | else: 247 | print('daemon process [%s] stopped' % daemon_name) 248 | else: 249 | print "Unknown command" 250 | sys.exit(2) 251 | sys.exit(0) 252 | else: 253 | print "usage: %s agent|server start|stop|restart|status" % sys.argv[0] 254 | sys.exit(2) 255 | -------------------------------------------------------------------------------- /My_lib/easyrun/README.md: -------------------------------------------------------------------------------- 1 | ## easyrun 2 | 3 | 一个 subprocess 模块的封装,可以更方便的进行系统调用 4 | 5 | 6 | 7 | * [1 安装](#1-安装) 8 | * [2 使用方法](#2-使用方法) 9 | * [2.1 run](#21-run) 10 | * [2.2 run_capture](#22-run_capture) 11 | * [communicate() 和 wait() 使用上的区别](#communicate-和-wait-使用上的区别) 12 | * [2.3 run_capture_limited](#23-run_capture_limited) 13 | * [2.4 run_timeout](#24-run_timeout) 14 | * [3 返回结果进行处理](#3-返回结果进行处理) 15 | * [3.1 返回结果处理为数组](#31-返回结果处理为数组) 16 | * [3.2 返回数据去掉换行符](#32-返回数据去掉换行符) 17 | * [4 更新说明](#4-更新说明) 18 | 19 | 20 | 21 | ## 1 安装 22 | 23 | ``` 24 | 将此目录下文件 easyrun.py 放到自己的程序目录中即可 25 | ``` 26 | 27 | ## 2 使用方法 28 | 29 | ### 2.1 run 30 | 31 | 只单纯的执行,然后返回 linux run code 和执行状态 32 | 33 | ``` 34 | >>> import easyrun 35 | >>> r = easyrun.run('uptime') 36 | 04:06:37 up 2 min, 1 user, load average: 0.20, 0.19, 0.08 37 | >>> r.output 38 | >>> r.success 39 | True 40 | >>> r.retcode 41 | 0 42 | ``` 43 | ### 2.2 run_capture 44 | 捕捉所有的执行结果 45 | ``` 46 | >>> r = easyrun.run_capture('uptime') 47 | >>> r.output 48 | ' 04:07:16 up 2 min, 1 user, load average: 0.11, 0.17, 0.08\n' 49 | >>> r.success 50 | True 51 | >>> r.retcode 52 | 0 53 | ``` 54 | 例子 55 | ``` 56 | from easyrun import run_capture 57 | 58 | r = run_capture('ls -la') 59 | if r.success: 60 | print(r.output) 61 | else: 62 | print("Error: '%s' exit code %s" % (r.command, r.retcode)) 63 | print(" ...") 64 | # print last three lines of output 65 | for line in r.output.splitlines()[-3:]: 66 | print(" %s" % line) 67 | ``` 68 | #### communicate() 和 wait() 使用上的区别 69 | 70 | subprocess 就是开启一个子进程,自己去执行命令,这个子进程的状态肯定得收集,这时候就需要调用 wait 或者 communicate 了,手册上面也注明了这两个方法的特点:在数据超过 PIPE 的缓存时,wait 会阻塞进程;communicate 会把所有的数据都读取到内存中。 71 | wait: 72 | ``` 73 | Warning 74 | This will deadlock if the child process generates enough output to a stdout or stderr pipe such that it blocks waiting for the OS pipe buffer to accept more data. Use communicate() to avoid that. 75 | ``` 76 | communicate: 77 | ``` 78 | Note 79 | The data read is buffered in memory, so do not use this method if the data size is large or unlimited 80 | ``` 81 | 如果想要一行一行的读取的话,可以通过以下方法解决: 82 | 83 | 数据一行一行读取,读取完之后 wait,这样既保证了不会阻塞(PIPE 中数据有进有出,最后空了才 wait),又保证了不会占用大量主机内存(在内存中的数据只有一行 line)。 84 | ``` 85 | p1=subprocess.Popen(comm1,shell=True,stdout=subprocess.PIPE) 86 | for line in p1.stdout: 87 | pass 88 | p1.wait() 89 | ``` 90 | 91 | ### 2.3 run_capture_limited 92 | 93 | 把输出的结果精简过,maxlines 是控制行数 94 | ``` 95 | print(run_capture_limited('ls', maxlines=2).output) 96 | ``` 97 | 98 | ### 2.4 run_timeout 99 | 100 | 设置程序运行时长 101 | 102 | 如下是获取 ES 状态示例,访问的服务器无响应时,一直无法返回结果,通过设置 timeout 以免程序阻塞 103 | 104 | ``` 105 | >>>import easyrun 106 | >>>r = easyrun.run_timeout('curl -sXGET http://IP:9200/_cluster/health/?pretty', timeout=3) 107 | >>>r.success 108 | False 109 | >>>r.retcode 110 | 124 111 | >>>r.output 112 | 'timeout' 113 | ``` 114 | 115 | ## 3 返回结果进行处理 116 | 117 | r.output 为字符串,日常使用时需要处理为对应类型的数据 118 | 119 | ### 3.1 返回结果处理为数组 120 | 使用此程序获取多行内容时处理方法,通过 split 方法将输出内容变为数组 121 | 122 | ``` 123 | r_list = r.output.split('\n') 124 | while "" in r_list: 125 | r_list.remove("") 126 | ``` 127 | ### 3.2 返回数据去掉换行符 128 | 129 | 比如 run_capture 获取的数据为一行字符串或者某个数字,则需要将其进行转换 130 | ``` 131 | r.output.replace("\n","") 132 | ``` 133 | 134 | ## 4 更新说明 135 | 136 | 137 | 本程序在[原程序 easyrun](https://github.com/rfyiamcool/easyrun) 的基础上,修正了部分问题 138 | 139 | > * v1.0.3 去掉 run_stream 和 run_async 函数 140 | > * v1.0.2 修正 run_timeout 无法生效问题 141 | 142 | -------------------------------------------------------------------------------- /My_lib/easyrun/easyrun.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # coding=utf8 3 | """ 4 | # Author: meetbill 5 | # Created Time : 2017-10-19 10:42:21 6 | 7 | # File Name: easyrun.py 8 | # Description: 9 | 10 | # v1.0.4 11 | 12 | """ 13 | import subprocess 14 | import time 15 | import os 16 | import signal 17 | 18 | 19 | class Result(object): 20 | def __init__(self, command=None, retcode=None, output=None): 21 | self.command = command or '' 22 | self.retcode = retcode 23 | self.output = output 24 | self.success = False 25 | if retcode == 0: 26 | self.success = True 27 | 28 | 29 | def run(command): 30 | process = subprocess.Popen(command, shell=True) 31 | process.communicate() 32 | return Result(command=command, retcode=process.returncode) 33 | 34 | 35 | def run_timeout(command, timeout=10): 36 | timeout = int(timeout) 37 | process = subprocess.Popen( 38 | command, 39 | stderr=subprocess.STDOUT, 40 | stdout=subprocess.PIPE, 41 | shell=True) 42 | t_beginning = time.time() 43 | seconds_passed = 0 44 | while True: 45 | if process.poll() is not None: 46 | break 47 | seconds_passed = time.time() - t_beginning 48 | if timeout and seconds_passed > timeout: 49 | process.terminate() 50 | return Result(command=command, retcode=124, output="timeout") 51 | time.sleep(0.1) 52 | output, _ = process.communicate() 53 | output = output.strip('\n') 54 | return Result(command=command, retcode=process.returncode, output=output) 55 | 56 | 57 | def run_capture(command): 58 | outpipe = subprocess.PIPE 59 | errpipe = subprocess.STDOUT 60 | process = subprocess.Popen(command, shell=True, stdout=outpipe, 61 | stderr=errpipe) 62 | output, _ = process.communicate() 63 | output = output.strip('\n') 64 | return Result(command=command, retcode=process.returncode, output=output) 65 | 66 | 67 | def run_capture_limited(command, maxlines=20000): 68 | 69 | import collections 70 | import threading 71 | 72 | lines = collections.deque(maxlen=maxlines) 73 | 74 | def reader_thread(stream, lock): 75 | for line in stream: 76 | lines.append(line) 77 | outpipe = subprocess.PIPE 78 | errpipe = subprocess.STDOUT 79 | process = subprocess.Popen(command, shell=True, stdout=outpipe, 80 | stderr=errpipe) 81 | lock = threading.Lock() 82 | thread = threading.Thread( 83 | target=reader_thread, args=( 84 | process.stdout, lock)) 85 | thread.start() 86 | 87 | process.wait() 88 | thread.join() 89 | 90 | return Result(command=command, retcode=process.returncode, 91 | output=''.join(lines)) 92 | 93 | 94 | def run_killpid(pid): 95 | os.kill(pid, signal.SIGTERM) 96 | 97 | 98 | if __name__ == '__main__': 99 | print('---[ .success ]---') 100 | print(run('ls').success) 101 | print(run('dir').success) 102 | 103 | print('---[ .retcode ]---') 104 | print(run('ls').retcode) 105 | print(run('dir').retcode) 106 | 107 | print('---[ capture ]---') 108 | print(len(run_capture('ls').output)) 109 | print('---[ capture ]ls---') 110 | print(run_capture('ls').output) 111 | 112 | print('---[ limited capture ]---') 113 | print(run_capture_limited('ls', maxlines=2).output) 114 | 115 | print('---[ timeout ]---') 116 | print(run_timeout('curl -s www.baidu.com', timeout=3).output) 117 | print('---[ timeout ]ls---') 118 | print(run_timeout('ls', timeout=3).output) 119 | print('---[ timeout & capture ]ls---') 120 | print(run_capture('ls').output == run_timeout('ls', timeout=3).output) 121 | -------------------------------------------------------------------------------- /My_lib/file_utils/README.md: -------------------------------------------------------------------------------- 1 | ## 修改配置文件工具 2 | 3 | 4 | 5 | * [功能](#功能) 6 | * [使用](#使用) 7 | * [获取配置](#获取配置) 8 | * [更改配置](#更改配置) 9 | * [二次开发](#二次开发) 10 | 11 | 12 | 13 | ## 功能 14 | 15 | * 查找配置文件中配置和修改配置文件工具 16 | * 更改配置只对特定项进行修改,如果没有则进行追加 17 | * 更改配置项后的形式为 key=value 形式,如果要进行修改请看二次开发 18 | 19 | ## 使用 20 | 21 | 直接 `python file_util.py` 会输出使用提示 22 | 23 | ### 获取配置 24 | 25 | 使用方法:python file_util.py cfg_get 参数 26 | 27 | 如: 28 | ``` 29 | python file_util.py cfg_get ./config s3_addr 30 | ``` 31 | 参数列表 32 | 33 | > * config_file: 配置文件位置 34 | > * item: 获取项 35 | > * detail: 详细显示,显示 item 在多少行以及是否为注释状态等等 36 | 37 | ### 更改配置 38 | 39 | 功能:对某配置进行修改 40 | 41 | 使用方法:python file_util.py cfg_set 参数 42 | 43 | 如: 44 | ``` 45 | python file_util.py cfg_set ./config s3_addr 192.168.1.3 46 | ``` 47 | 参数列表 48 | 49 | > * config_file: 配置文件位置 50 | > * item: 获取项 51 | > * value: 某项要更改的值 52 | > * commented: 配置的时候是否配置为注释状态 53 | 54 | ## 二次开发 55 | 56 | 更改配置项后的形式为 key=value 形式,如果要进行修改,比如修改为 `key="value"` 时可以修改程序 57 | 58 | 程序中`###############` 注释下的部分即为需要修改的地方 59 | -------------------------------------------------------------------------------- /My_lib/file_utils/config: -------------------------------------------------------------------------------- 1 | s3_addr=192.168.1.5 2 | s3_accesskey="" 3 | s3_secretkey="" 4 | s3_region="" 5 | #s3_ceshi="" 6 | -------------------------------------------------------------------------------- /My_lib/file_utils/file_util.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # coding=utf8 3 | """ 4 | # Author: meetbill 5 | # Created Time : 2016-09-05 19:57:09 6 | 7 | # File Name: file_util.py 8 | # Description: 9 | # Package for operations. 10 | """ 11 | 12 | import os 13 | import sys 14 | import re 15 | 16 | reload(sys) 17 | sys.setdefaultencoding('utf8') 18 | 19 | # 需要修改,配置文件位置 20 | 21 | 22 | def _loadconfig(cfgfile=None, detail=False): 23 | """Read config file and parse config item to dict. 24 | """ 25 | settings = {} 26 | with open(cfgfile) as f: 27 | for line_i, line in enumerate(f): 28 | # line_i[行号],line[每行内容] 29 | 30 | # 删除空白符(包括'\n', '\r', '\t', ' ') 31 | line = line.strip() 32 | 33 | # 跳过空行和注释('# '开头的) 34 | if not line or line.startswith('# '): 35 | continue 36 | 37 | # detect if it's commented 38 | if line.startswith('#'): 39 | line = line.strip('#') 40 | commented = True 41 | if not detail: 42 | continue 43 | else: 44 | commented = False 45 | # 将行以第一个'='分割 46 | ######################################### 47 | fs = re.split('=', line, 1) 48 | if len(fs) != 2: 49 | continue 50 | 51 | item = fs[0].strip() 52 | value = fs[1].strip() 53 | 54 | if item in settings: 55 | if detail: 56 | count = settings[item]['count'] + 1 57 | if not commented: 58 | settings[item] = detail and { 59 | 'file': cfgfile, 60 | 'line': line_i, 61 | 'value': value, 62 | 'commented': commented, 63 | } or value 64 | else: 65 | count = 1 66 | settings[item] = detail and { 67 | 'file': cfgfile, 68 | 'line': line_i, 69 | 'value': fs[1].strip(), 70 | 'commented': commented, 71 | } or value 72 | if detail: 73 | settings[item]['count'] = count 74 | 75 | return settings 76 | 77 | 78 | def cfg_get(config_file, item, detail=False): 79 | """ 80 | 功能:获取配置文件中某个 item 的值 81 | 82 | config_file:配置文件位置 83 | item:获取项 84 | detail:详细显示,显示 item 在多少行以及是否为注释状态等等 85 | 86 | 例子:python file_util.py cfg_get ./config s3_addr 87 | """ 88 | if not os.path.exists(config_file): 89 | return None 90 | config = _loadconfig(config_file, detail=detail) 91 | if item in config: 92 | return config[item] 93 | else: 94 | return None 95 | 96 | 97 | def cfg_set(config_file, item, value, commented=False): 98 | """ 99 | 功能:对某配置进行修改,如果可以获取到 key,则对 key 后的 item 进行修改如果获取不到 key,则直接在配置文件后进行追加一行 100 | 101 | config_file:配置文件位置 102 | item:获取项 103 | value:某项要更改的值 104 | commented:配置的时候是否配置为注释状态 105 | 106 | 例子:python file_util.py cfg_set ./config s3_addr 192.168.1.3 107 | """ 108 | v = cfg_get(config_file, item, detail=True) 109 | #print v 110 | 111 | if v: 112 | # detect if value change 113 | if v['commented'] == commented and v['value'] == value: 114 | return True 115 | 116 | # empty value should be commented 117 | # 如果有key,但是传的value值为空,会将此行进行注释 118 | if value == '': 119 | commented = True 120 | 121 | # replace item in line 122 | lines = [] 123 | with open(v['file']) as f: 124 | for line_i, line in enumerate(f): 125 | if line_i == v['line']: 126 | # 对没注释的行进行操作 127 | if not v['commented']: 128 | # 检测是否需要注释 129 | if commented: 130 | if v['count'] > 1: 131 | # delete this line, just ignore it 132 | pass 133 | else: 134 | # comment this line 135 | ######################################### 136 | lines.append('#%s=%s\n' % (item, value)) 137 | else: 138 | ######################################### 139 | lines.append('%s=%s\n' % (item, value)) 140 | else: 141 | if commented: 142 | # do not allow change comment value 143 | lines.append(line) 144 | pass 145 | else: 146 | # append a new line after comment line 147 | lines.append(line) 148 | ######################################### 149 | lines.append('%s=%s\n' % (item, value)) 150 | else: 151 | lines.append(line) 152 | with open(v['file'], 'w') as f: 153 | f.write(''.join(lines)) 154 | else: 155 | # append to the end of file 156 | with open(config_file, 'a') as f: 157 | ######################################### 158 | f.write('\n%s%s = %s\n' % (commented and '#' or '', item, value)) 159 | #cfg_get(config_file,item, detail=True) 160 | return True 161 | 162 | 163 | if __name__ == '__main__': 164 | import sys 165 | import inspect 166 | if len(sys.argv) < 2: 167 | print "Usage:" 168 | for k, v in sorted(globals().items(), key=lambda item: item[0]): 169 | if inspect.isfunction(v) and k[0] != "_": 170 | args, __, __, defaults = inspect.getargspec(v) 171 | if defaults: 172 | print sys.argv[0], k, str(args[:-len(defaults)])[1:-1].replace(",", ""), \ 173 | str(["%s=%s" % (a, b) for a, b in zip( 174 | args[-len(defaults):], defaults)])[1:-1].replace(",", "") 175 | else: 176 | print sys.argv[0], k, str(v.func_code.co_varnames[:v.func_code.co_argcount])[ 177 | 1:-1].replace(",", "") 178 | sys.exit(-1) 179 | else: 180 | func = eval(sys.argv[1]) 181 | args = sys.argv[2:] 182 | try: 183 | r = func(*args) 184 | print r 185 | except Exception as e: 186 | print "Usage:" 187 | print "\t", "python %s %s" % (sys.argv[0], sys.argv[1]), str( 188 | func.func_code.co_varnames[:func.func_code.co_argcount])[1:-1].replace(",", "") 189 | if func.func_doc: 190 | print "\n".join(["\t\t" + line.strip() 191 | for line in func.func_doc.strip().split("\n")]) 192 | print e 193 | r = -1 194 | import traceback 195 | traceback.print_exc() 196 | if isinstance(r, int): 197 | sys.exit(r) 198 | -------------------------------------------------------------------------------- /My_lib/log_utils/README.md: -------------------------------------------------------------------------------- 1 | ## blog 2 | 3 | 4 | 5 | 6 | * [1 使用方法](#1-使用方法) 7 | * [1.1 全局日志配置](#11-全局日志配置) 8 | * [1.2 针对不同的用途或模块,指定不同的日志](#12-针对不同的用途或模块指定不同的日志) 9 | 10 | 11 | 12 | 日志库 13 | 14 | Function 15 | 16 | >* 可设置是否输出到终端,如果输出到终端,则彩色显示 17 | >* 可设置日志路径及输出日志级别 18 | >* 可设置日志轮转大小及保存个数 19 | 20 | ## 1 使用方法 21 | 22 | ### 1.1 全局日志配置 23 | 24 | ``` 25 | import blog 26 | blog.init_log("./log/common.log") 27 | ``` 28 | > * 配置全局日志配置后,默认 info 级别以上的日志会打印到 "./log/common.log.log" 29 | > * warning 级别以上的日志会打印到 "./log/common.log.log.wf" 中 30 | 31 | ### 1.2 针对不同的用途或模块,指定不同的日志 32 | 33 | ``` 34 | import blog 35 | debug = True 36 | logpath = "./log/test.log" 37 | logger = blog.Log( 38 | logpath, 39 | level="debug", 40 | logid="meetbill", 41 | is_console=debug, 42 | mbs=5, 43 | count=5) 44 | 45 | logstr = "helloworld" 46 | logger.error(logstr) 47 | logger.info(logstr) 48 | logger.warn(logstr) 49 | logger.debug(logstr) 50 | ``` 51 | 每个 logger 都有个名字,以 ‘.’ 来划分继承关系。名字为空的就是 root_logger, 默认所有的日志都会出现在全局的 logging 配置的日志文件中 52 | 53 | 如何让自定义 logger 的内容不出现在全局的 logging 里面,其中起作用的就是如下一行 54 | ``` 55 | self._logger.propagate=False 56 | ``` 57 | -------------------------------------------------------------------------------- /My_lib/log_utils/blog.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # coding=utf8 3 | """ 4 | # Author: meetbill 5 | # Created Time : 2016-08-01 10:59:26 6 | 7 | # File Name: blog.py 8 | # Description: 9 | 10 | """ 11 | import sys 12 | import logging 13 | from logging.handlers import RotatingFileHandler 14 | import os 15 | 16 | 17 | class ColoredFormatter(logging.Formatter): 18 | '''A colorful formatter.''' 19 | 20 | def __init__(self, fmt=None, datefmt=None): 21 | logging.Formatter.__init__(self, fmt, datefmt) 22 | # Color escape string 23 | COLOR_RED = '\033[1;31m' 24 | COLOR_GREEN = '\033[1;32m' 25 | COLOR_YELLOW = '\033[1;33m' 26 | COLOR_BLUE = '\033[1;34m' 27 | COLOR_PURPLE = '\033[1;35m' 28 | COLOR_CYAN = '\033[1;36m' 29 | COLOR_GRAY = '\033[1;37m' 30 | COLOR_WHITE = '\033[1;38m' 31 | COLOR_RESET = '\033[1;0m' 32 | 33 | # Define log color 34 | self.LOG_COLORS = { 35 | 'DEBUG': '%s', 36 | 'INFO': COLOR_GREEN + '%s' + COLOR_RESET, 37 | 'WARNING': COLOR_YELLOW + '%s' + COLOR_RESET, 38 | 'ERROR': COLOR_RED + '%s' + COLOR_RESET, 39 | 'CRITICAL': COLOR_RED + '%s' + COLOR_RESET, 40 | 'EXCEPTION': COLOR_RED + '%s' + COLOR_RESET, 41 | } 42 | 43 | def format(self, record): 44 | level_name = record.levelname 45 | msg = logging.Formatter.format(self, record) 46 | 47 | return self.LOG_COLORS.get(level_name, '%s') % msg 48 | 49 | 50 | class Log(object): 51 | 52 | ''' 53 | log 54 | ''' 55 | 56 | def __init__(self, filename, level="debug", logid="meetbill", 57 | mbs=20, count=10, is_console=True): 58 | ''' 59 | mbs: how many MB 60 | count: the count of remain 61 | ''' 62 | try: 63 | self._level = level 64 | #print "init,level:",level,"\t","get_map_level:",self._level 65 | self._filename = filename 66 | self._logid = logid 67 | self._logger = logging.getLogger(self._logid) 68 | file_path = os.path.split(self._filename)[0] 69 | if not os.path.exists(file_path): 70 | os.makedirs(file_path) 71 | 72 | if not len(self._logger.handlers): 73 | self._logger.setLevel(self.get_map_level(self._level)) 74 | 75 | fmt = '[%(asctime)s] %(levelname)s %(message)s' 76 | datefmt = '%Y-%m-%d %H:%M:%S' 77 | formatter = logging.Formatter(fmt, datefmt) 78 | 79 | maxBytes = int(mbs) * 1024 * 1024 80 | file_handler = RotatingFileHandler( 81 | self._filename, mode='a', maxBytes=maxBytes, backupCount=count) 82 | self._logger.setLevel(self.get_map_level(self._level)) 83 | file_handler.setFormatter(formatter) 84 | self._logger.addHandler(file_handler) 85 | 86 | if is_console == True: 87 | stream_handler = logging.StreamHandler(sys.stderr) 88 | console_formatter = ColoredFormatter(fmt, datefmt) 89 | stream_handler.setFormatter(console_formatter) 90 | self._logger.addHandler(stream_handler) 91 | self._logger.propagate = False 92 | 93 | except Exception as expt: 94 | print expt 95 | 96 | def tolog(self, msg, level=None): 97 | try: 98 | level = level if level else self._level 99 | level = str(level).lower() 100 | level = self.get_map_level(level) 101 | if level == logging.DEBUG: 102 | self._logger.debug(msg) 103 | if level == logging.INFO: 104 | self._logger.info(msg) 105 | if level == logging.WARN: 106 | self._logger.warn(msg) 107 | if level == logging.ERROR: 108 | self._logger.error(msg) 109 | if level == logging.CRITICAL: 110 | self._logger.critical(msg) 111 | except Exception as expt: 112 | print expt 113 | 114 | def debug(self, msg): 115 | self.tolog(msg, level="debug") 116 | 117 | def info(self, msg): 118 | self.tolog(msg, level="info") 119 | 120 | def warn(self, msg): 121 | self.tolog(msg, level="warn") 122 | 123 | def error(self, msg): 124 | self.tolog(msg, level="error") 125 | 126 | def critical(self, msg): 127 | self.tolog(msg, level="critical") 128 | 129 | def get_map_level(self, level="debug"): 130 | level = str(level).lower() 131 | #print "get_map_level:",level 132 | if level == "debug": 133 | return logging.DEBUG 134 | if level == "info": 135 | return logging.INFO 136 | if level == "warn": 137 | return logging.WARN 138 | if level == "error": 139 | return logging.ERROR 140 | if level == "critical": 141 | return logging.CRITICAL 142 | 143 | 144 | def init_log(log_path, level=logging.INFO, when="D", backup=7, 145 | format="%(levelname)s: %(asctime)s: %(filename)s:%(lineno)d * %(thread)d %(message)s", 146 | datefmt="%m-%d %H:%M:%S"): 147 | """ 148 | init_log - initialize log module 149 | 150 | Args: 151 | log_path - Log file path prefix. 152 | Log data will go to two files: log_path.log and log_path.log.wf 153 | Any non-exist parent directories will be created automatically 154 | level - msg above the level will be displayed 155 | DEBUG < INFO < WARNING < ERROR < CRITICAL 156 | the default value is logging.INFO 157 | when - how to split the log file by time interval 158 | 'S' : Seconds 159 | 'M' : Minutes 160 | 'H' : Hours 161 | 'D' : Days 162 | 'W' : Week day 163 | default value: 'D' 164 | format - format of the log 165 | default format: 166 | %(levelname)s: %(asctime)s: %(filename)s:%(lineno)d * %(thread)d %(message)s 167 | INFO: 12-09 18:02:42: log.py:40 * 139814749787872 HELLO WORLD 168 | backup - how many backup file to keep 169 | default value: 7 170 | 171 | Raises: 172 | OSError: fail to create log directories 173 | IOError: fail to open log file 174 | """ 175 | formatter = logging.Formatter(format, datefmt) 176 | logger = logging.getLogger() 177 | logger.setLevel(level) 178 | 179 | dir = os.path.dirname(log_path) 180 | if not os.path.isdir(dir): 181 | os.makedirs(dir) 182 | 183 | handler = logging.handlers.TimedRotatingFileHandler(log_path + ".log", 184 | when=when, 185 | backupCount=backup) 186 | handler.setLevel(level) 187 | handler.setFormatter(formatter) 188 | logger.addHandler(handler) 189 | 190 | handler = logging.handlers.TimedRotatingFileHandler(log_path + ".log.wf", 191 | when=when, 192 | backupCount=backup) 193 | handler.setLevel(logging.WARNING) 194 | handler.setFormatter(formatter) 195 | logger.addHandler(handler) 196 | 197 | 198 | if __name__ == "__main__": 199 | # 通用模块日志 200 | init_log("./log/common.log") 201 | logging.info("info log") 202 | logging.warning("warning log") 203 | logging.error("error log") 204 | logging.debug("debug log") 205 | # 独立模块日志 206 | debug = True 207 | logpath = "./log/test.log" 208 | logger = Log( 209 | logpath, 210 | level="debug", 211 | logid="meetbill", 212 | is_console=debug, 213 | mbs=5, 214 | count=5) 215 | 216 | logstr = "helloworld" 217 | logger.error(logstr) 218 | logger.info(logstr) 219 | logger.warn(logstr) 220 | logger.debug(logstr) 221 | -------------------------------------------------------------------------------- /My_lib/monitor_process/monitor.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | #coding=utf8 3 | """ 4 | # Author: 遇见王斌 5 | # Created Time : 2016-11-11 22:39:02 6 | 7 | # File Name: ww.py 8 | # Description: 9 | 10 | """ 11 | import subprocess 12 | def check(p_name): 13 | cmd = 'ps -ef |grep %s|grep -v "grep"' % p_name 14 | p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) 15 | if p.wait() == 0: 16 | val = p.stdout.read() 17 | if p_name in val: 18 | print "running" 19 | else: 20 | print "no running" 21 | 22 | 23 | check("unode") 24 | 25 | -------------------------------------------------------------------------------- /My_lib/mpms/demo.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | """ 3 | Do parallel python works easily in multithreads in multiprocesses 4 | 一个简单的多进程-多线程工作框架 5 | 6 | Work model: 7 | A simple task-worker-handler model. 8 | Main threads will continuing adding tasks (task parameters) to task queue. 9 | Many outer workers(in many threads and many processes) would read tasks from queue one by one and work them out, 10 | then put the result(if we have) into the product queue. 11 | An handler thread in main process will read the products in the product queue(if we have), 12 | and then handle those products. 13 | 14 | Why Multithreads in Multiprocesses? 15 | Many jobs are time-consuming but not very cpu-consuming (such as web fetching), 16 | due to python's GIL,we cannot use multi-cores in single processes, 17 | one process is able to handle 50-80 threads, 18 | but can never execute 1000 or 2000 threads, 19 | so a stupid but workable way is put those jobs in many threads in many processes 20 | 21 | 工作模型: 22 | 主线程不断向队列中添加任务参数 23 | 外部进程的大量线程(工作函数)不断从任务队列中读取参数,并行执行后将结果加入到结果队列 24 | 主线程中新开一个处理线程,不断从结果队列读取并依此处理 25 | 26 | Due to many threads, some time-consuming tasks would finish much faster than single threads 27 | 可以显著提升某些长时间等待的工作的效率,如网络访问 28 | 29 | # Win10 x64, python3.5.1 32bit, Intel I7 with 4 cores 8 threads 30 | Processes:20 Threads_per_process:50 Total_threads:1000 TotalTime: 0.7728791236877441 31 | Processes:10 Threads_per_process:20 Total_threads:200 TotalTime: 2.1930654048919678 32 | Processes:5 Threads_per_process:10 Total_threads:50 TotalTime: 8.134965896606445 33 | Processes:3 Threads_per_process:3 Total_threads:9 TotalTime: 44.83632779121399 34 | Processes:1 Threads_per_process:1 Total_threads:1 TotalTime: 401.3383722305298 35 | """ 36 | from __future__ import unicode_literals, print_function 37 | from time import time, sleep 38 | 39 | from mylib.mpms import MPMS, Meta 40 | 41 | 42 | def worker(index, t=None): 43 | """ 44 | Worker function, accept task parameters and do actual work 45 | should be able to accept at least one arg 46 | ALWAYS works in external thread in external process 47 | 48 | 工作函数,接受任务参数,并进行实际的工作 49 | 总是工作在外部进程的线程中 (即不工作在主进程中) 50 | """ 51 | sleep(0.2) # delay 0.2 second 52 | print(index, t) 53 | 54 | # worker's return value will be added to product queue, waiting handler to handle 55 | # you can return any type here (Included the None , of course) 56 | # worker函数的返回值会被加入到队列中,供handler依次处理,返回值允许除了 StopIteration 以外的任何类型 57 | return index, "hello world" 58 | 59 | 60 | # noinspection PyStatementEffect 61 | def collector(meta, result): 62 | """ 63 | Accept and handle worker's product 64 | It must have at least one arg, because any function in python will return value (maybe None) 65 | It is running in single thread in the main process, 66 | if you want to have multi-threads handler, you can simply pass it's arg(s) to another working queue 67 | 68 | 接受并处理worker给出的product 69 | handler总是单线程的,运行时会在主进程中新开一个handler线程 70 | 如果需要多线程handler,可以新建第二个多线程实例然后把它接收到的参数传入第二个实例的工作队列 71 | handler必须能接受worker给出的参数 72 | 即使worker无显示返回值(即没有return)也应该写一个参数来接收None 73 | 74 | Args: 75 | meta (Meta): meta信息, 详见 Meta 的docstring 76 | result (Any|Exception): 77 | worker的返回值, 若worker出错, 则返回对应的 Exception 78 | """ 79 | if isinstance(result, Exception): 80 | return 81 | index, t = result 82 | print("received", index, t, time()) 83 | meta.taskid, meta.args, meta.kwargs # 分别为此任务的 taskid 和 传入的 args kwargs 84 | meta['want'] # 在 main 中传入的meta字典中的参数 85 | meta.mpms # meta.mpms 中保存的是当前的 MPMS 实例 86 | 87 | 88 | def main(): 89 | results = "" 90 | # we will run the benchmarks several times using the following params 91 | # 下面这些值用于多次运行,看时间 92 | test_params = ( 93 | # (processes, threads_per_process) 94 | (10, 20), 95 | (5, 10), 96 | (1, 3) 97 | ) 98 | for processes, threads_per_process in test_params: 99 | # Init the poll # 初始化 100 | m = MPMS( 101 | worker, 102 | collector, 103 | processes=processes, # optional, how many processes, default value is your cpu core number 104 | threads=threads_per_process, # optional, how many threads per process, default is 2 105 | meta={"any": 1, "dict": "you", "want": {"pass": "to"}, "worker": 0.5}, 106 | ) 107 | m.start() # start and fork subprocess 108 | start_time = time() # when we started # 记录开始时间 109 | 110 | # put task parameters into the task queue, 2000 total tasks 111 | # 把任务加入任务队列,一共2000次 112 | for i in range(200): 113 | m.put(i, t=time()) 114 | 115 | # optional, close the task queue. queue will be auto closed when join() 116 | # 关闭任务队列,可选. 在join()的时候会自动关闭 117 | # m.close() 118 | 119 | # close task queue and wait all workers and handler to finish 120 | # 等待全部任务及全部结果处理完成 121 | m.join() 122 | 123 | # write and print records 124 | # 下面只是记录和打印结果 125 | results += "Processes:" + str(processes) + " Threads_per_process:" + str(threads_per_process) \ 126 | + " Total_threads:" + str(processes * threads_per_process) \ 127 | + " TotalTime: " + str(time() - start_time) + "\n" 128 | print(results) 129 | 130 | print('sleeping 5s before next') 131 | sleep(5) 132 | 133 | 134 | if __name__ == '__main__': 135 | main() 136 | -------------------------------------------------------------------------------- /My_lib/mpms/mylib/BLog.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | #coding=utf8 3 | """ 4 | # Author: Bill 5 | # Created Time : 2016-08-01 10:59:26 6 | 7 | # File Name: w.py 8 | # Description: 9 | 10 | """ 11 | import sys 12 | import logging 13 | from logging.handlers import RotatingFileHandler 14 | import os 15 | 16 | class ColoredFormatter(logging.Formatter): 17 | '''A colorful formatter.''' 18 | 19 | def __init__(self, fmt = None, datefmt = None): 20 | logging.Formatter.__init__(self, fmt, datefmt) 21 | # Color escape string 22 | COLOR_RED='\033[1;31m' 23 | COLOR_GREEN='\033[1;32m' 24 | COLOR_YELLOW='\033[1;33m' 25 | COLOR_BLUE='\033[1;34m' 26 | COLOR_PURPLE='\033[1;35m' 27 | COLOR_CYAN='\033[1;36m' 28 | COLOR_GRAY='\033[1;37m' 29 | COLOR_WHITE='\033[1;38m' 30 | COLOR_RESET='\033[1;0m' 31 | 32 | # Define log color 33 | self.LOG_COLORS = { 34 | 'DEBUG': '%s', 35 | 'INFO': COLOR_GREEN + '%s' + COLOR_RESET, 36 | 'WARNING': COLOR_YELLOW + '%s' + COLOR_RESET, 37 | 'ERROR': COLOR_RED + '%s' + COLOR_RESET, 38 | 'CRITICAL': COLOR_RED + '%s' + COLOR_RESET, 39 | 'EXCEPTION': COLOR_RED + '%s' + COLOR_RESET, 40 | } 41 | 42 | def format(self, record): 43 | level_name = record.levelname 44 | msg = logging.Formatter.format(self, record) 45 | 46 | return self.LOG_COLORS.get(level_name, '%s') % msg 47 | 48 | class Log(object): 49 | 50 | ''' 51 | log 52 | ''' 53 | def __init__(self, filename, level="debug", logid="qiueer", mbs=20, count=10, is_console=True): 54 | ''' 55 | mbs: how many MB 56 | count: the count of remain 57 | ''' 58 | try: 59 | self._level = level 60 | #print "init,level:",level,"\t","get_map_level:",self._level 61 | self._filename = filename 62 | self._logid = logid 63 | 64 | self._logger = logging.getLogger(self._logid) 65 | 66 | 67 | file_path = os.path.split(self._filename)[0] 68 | if not os.path.exists(file_path): 69 | os.makedirs(file_path) 70 | 71 | if not len(self._logger.handlers): 72 | self._logger.setLevel(self.get_map_level(self._level)) 73 | 74 | fmt = '[%(asctime)s] %(levelname)s\n%(message)s' 75 | datefmt = '%Y-%m-%d %H:%M:%S' 76 | formatter = logging.Formatter(fmt, datefmt) 77 | 78 | maxBytes = int(mbs) * 1024 * 1024 79 | file_handler = RotatingFileHandler(self._filename, mode='a',maxBytes=maxBytes,backupCount=count) 80 | self._logger.setLevel(self.get_map_level(self._level)) 81 | file_handler.setFormatter(formatter) 82 | self._logger.addHandler(file_handler) 83 | 84 | if is_console == True: 85 | stream_handler = logging.StreamHandler(sys.stderr) 86 | console_formatter = ColoredFormatter(fmt, datefmt) 87 | stream_handler.setFormatter(console_formatter) 88 | self._logger.addHandler(stream_handler) 89 | 90 | except Exception as expt: 91 | print expt 92 | 93 | def tolog(self, msg, level=None): 94 | try: 95 | level = level if level else self._level 96 | level = str(level).lower() 97 | level = self.get_map_level(level) 98 | if level == logging.DEBUG: 99 | self._logger.debug(msg) 100 | if level == logging.INFO: 101 | self._logger.info(msg) 102 | if level == logging.WARN: 103 | self._logger.warn(msg) 104 | if level == logging.ERROR: 105 | self._logger.error(msg) 106 | if level == logging.CRITICAL: 107 | self._logger.critical(msg) 108 | except Exception as expt: 109 | print expt 110 | 111 | def debug(self,msg): 112 | self.tolog(msg, level="debug") 113 | 114 | def info(self,msg): 115 | self.tolog(msg, level="info") 116 | 117 | def warn(self,msg): 118 | self.tolog(msg, level="warn") 119 | 120 | def error(self,msg): 121 | self.tolog(msg, level="error") 122 | 123 | def critical(self,msg): 124 | self.tolog(msg, level="critical") 125 | 126 | def get_map_level(self,level="debug"): 127 | level = str(level).lower() 128 | #print "get_map_level:",level 129 | if level == "debug": 130 | return logging.DEBUG 131 | if level == "info": 132 | return logging.INFO 133 | if level == "warn": 134 | return logging.WARN 135 | if level == "error": 136 | return logging.ERROR 137 | if level == "critical": 138 | return logging.CRITICAL 139 | 140 | 141 | if __name__ == "__main__": 142 | from BLog import Log 143 | debug=False 144 | logpath = "/tmp/test.log" 145 | logger = Log(logpath,level="debug",is_console=debug, mbs=5, count=5) 146 | 147 | 148 | logstr="helloworld" 149 | logger.error(logstr) 150 | logger.info(logstr) 151 | logger.warn(logstr) 152 | -------------------------------------------------------------------------------- /My_lib/mpms/mylib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/meetbill/MyPythonLib/fd58b8a112ad366fba67771cafd752f0b8f378ee/My_lib/mpms/mylib/__init__.py -------------------------------------------------------------------------------- /My_lib/mpms/readme.md: -------------------------------------------------------------------------------- 1 | # mpms 2 | 3 | * [简介](#简介) 4 | * [install](#install) 5 | * [run](#run) 6 | * [说明](#说明) 7 | * [version](#version) 8 | 9 | 10 | 11 | ### 简介 12 | Simple python Multiprocesses-Multithreads queue 13 | 简易Python多进程-多线程任务队列 14 | (自用, ap不为生产环境下造成的任何损失和灵异现象负责) 15 | 16 | 在多个进程的多个线程的 worker 中完成耗时的任务, 并在主进程的 collector 中处理结果 17 | 18 | 支持python 2.7/3.4+ 19 | 20 | ### install 21 | 22 | ```shell 23 | pip install mpms 24 | ``` 25 | 26 | ### run 27 | 28 | ```python 29 | import requests 30 | from mpms import MPMS 31 | 32 | def worker(i, j=None): 33 | r = requests.get('http://example.com', params={"q": i}) 34 | return r.elapsed 35 | 36 | def collector(meta, result): 37 | print(meta.args[0], result) 38 | 39 | def main(): 40 | m = MPMS( 41 | worker, 42 | collector, # optional 43 | processes=2, 44 | threads=10, # 每进程的线程数 45 | ) 46 | m.start() 47 | for i in range(100): # 你可以自行控制循环条件 48 | m.put(i, j=i + 1) # 这里的参数列表就是worker接受的参数 49 | m.join() 50 | 51 | if __name__ == '__main__': 52 | main() 53 | ``` 54 | 更多请看 `demo.py` 55 | ### 说明 56 | 57 | collector 中的 meta 参数 58 | 59 | > * meta.taskid # 此任务的 taskid 60 | > * meta.args # 此任务的 args 61 | > * meta.kwargs # 此任务的 kwargs 62 | 63 | ### version 64 | 65 | > * V2.0.0.1 66 | > * (1) 增加输出日志到 /tmp/mpms.log 67 | > * (2) 当进程数为 1 时,使用的队列自动修改为 Queue(当使用的python 版本没有开启 sem_open 时使用,即无法使用多进程库 multiprocessing) 68 | > * V2.0.0.0 69 | > * [原程序 mpms](https://github.com/aploium/mpms) 70 | -------------------------------------------------------------------------------- /My_lib/netlib/.gitignore: -------------------------------------------------------------------------------- 1 | /Power.py 2 | -------------------------------------------------------------------------------- /My_lib/netlib/NetUtils.py: -------------------------------------------------------------------------------- 1 | # -*- coding:utf-8 -*- 2 | from inspect import currentframe 3 | import socket 4 | import select 5 | import time 6 | 7 | DEBUG = False 8 | 9 | 10 | def get_linenumber(): 11 | cf = currentframe() 12 | return str(cf.f_back.f_back.f_lineno) 13 | 14 | 15 | def dbgPrint(msg): 16 | if DEBUG: 17 | print get_linenumber(), msg 18 | 19 | 20 | import signal 21 | import functools 22 | 23 | 24 | class TimeoutError(Exception): 25 | pass 26 | 27 | 28 | def timeout(seconds, error_message="function call time out"): 29 | 30 | def decorated(func): 31 | def _handle_timeout(signum, frame): 32 | raise TimeoutError(error_message) 33 | 34 | def wrapper(*args, **kwargs): 35 | signal.signal(signal.SIGALRM, _handle_timeout) 36 | signal.alarm(seconds) 37 | try: 38 | result = func(*args, **kwargs) 39 | finally: 40 | signal.alarm(10) 41 | return result 42 | return functools.wraps(func)(wrapper) 43 | return decorated 44 | 45 | 46 | @timeout(5) 47 | def connect_timeout(socket, host_port): 48 | return socket.connect(host_port) 49 | 50 | 51 | def sendData_mh(sock_list, host_list, data, single_host_retry=3): 52 | """ 53 | saver_list = [host1:port, host2:port, host3:port] 54 | sock_list = [some socket] 55 | """ 56 | done = False 57 | for host_port in host_list: 58 | if done: 59 | break 60 | host, port = host_port.split(":") 61 | port = int(port) 62 | retry = 0 63 | while retry < single_host_retry: 64 | try: 65 | if sock_list[0] is None: 66 | sock_list[0] = socket.socket( 67 | socket.AF_INET, socket.SOCK_STREAM) 68 | sock_list[0].settimeout(5) 69 | sock_list[0].connect((host, port)) 70 | d = data 71 | sock_list[0].sendall("%010d%s" % (len(d), d)) 72 | count = sock_list[0].recv(10) 73 | if not count: 74 | raise Exception("recv error") 75 | count = int(count) 76 | buf = sock_list[0].recv(count) 77 | if buf[:2] == "OK": 78 | retry = 0 79 | break 80 | 81 | except BaseException: 82 | sock_list[0].close() 83 | sock_list[0] = None 84 | retry += 1 85 | 86 | 87 | def sendData(sock_l, host, port, data): 88 | retry = 0 89 | while retry < 3: 90 | try: 91 | if sock_l[0] is None: 92 | sock_l[0] = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 93 | sock_l[0].connect((host, port)) 94 | dbgPrint("\n-- start connect %s:%d" % (host, port)) 95 | d = data 96 | sock_l[0].sendall("%010d%s" % (len(data), data)) 97 | count = sock_l[0].recv(10) 98 | if not count: 99 | raise Exception("recv error") 100 | buf = sock_l[0].recv(int(count)) 101 | dbgPrint("recv data: %s" % buf) 102 | if buf[:2] == "OK": 103 | retry = 0 104 | break 105 | except BaseException: 106 | sock_l[0].close() 107 | sock_l[0] = None 108 | retry += 1 109 | # initial status for state machine 110 | 111 | 112 | class STATE: 113 | def __init__(self): 114 | self.state = "accept" 115 | self.have_read = 0 116 | self.need_read = 10 117 | self.have_write = 0 118 | self.need_write = 0 119 | self.buff_write = "" 120 | self.buff_read = "" 121 | # sock_obj is a object 122 | self.sock_obj = "" 123 | 124 | def printState(self): 125 | if DEBUG: 126 | dbgPrint('\n - current state of fd: %d' % self.sock_obj.fileno()) 127 | dbgPrint(" - - state: %s" % self.state) 128 | dbgPrint(" - - have_read: %s" % self.have_read) 129 | dbgPrint(" - - need_read: %s" % self.need_read) 130 | dbgPrint(" - - have_write: %s" % self.have_write) 131 | dbgPrint(" - - need_write: %s" % self.need_write) 132 | dbgPrint(" - - buff_write: %s" % self.buff_write) 133 | dbgPrint(" - - buff_read: %s" % self.buff_read) 134 | dbgPrint(" - - sock_obj: %s" % self.sock_obj) 135 | 136 | 137 | if __name__ == "__main__": 138 | import json 139 | import time 140 | try: 141 | s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 142 | sock_l = [s] 143 | s.connect(("127.0.0.1", 9999)) 144 | except socket.error as err: 145 | print err 146 | while True: 147 | print "send data to %s %s" % ("127.0.0.1", "9999") 148 | data = "hello world" 149 | sendData(sock_l, '127.0.0.1', '9999', json.dumps(data)) 150 | time.sleep(5) 151 | -------------------------------------------------------------------------------- /My_lib/netlib/README.md: -------------------------------------------------------------------------------- 1 | # send the data 2 | NetUtils.py 3 | 4 | # receive 5 | NetBase.py 6 | 7 | -------------------------------------------------------------------------------- /My_lib/netlib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/meetbill/MyPythonLib/fd58b8a112ad366fba67771cafd752f0b8f378ee/My_lib/netlib/__init__.py -------------------------------------------------------------------------------- /My_lib/peewee/user.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # coding=utf8 3 | """ 4 | # Author: meetbill 5 | # Created Time : 2019-08-15 23:05:56 6 | 7 | # File Name: test_user.py 8 | # Description: 9 | 10 | """ 11 | from faker import Factory 12 | from datetime import datetime 13 | import hashlib 14 | 15 | import xlib.db 16 | from xlib.db.peewee import * 17 | 18 | # Create an instance of a Database 19 | mysql_config_url="mysql+pool://root:123456@127.0.0.1:3306/test?max_connections=300&stale_timeout=300" 20 | db = xlib.db.connect(url=mysql_config_url) 21 | 22 | 23 | # Define a model class 24 | class User(Model): 25 | # If none of the fields are initialized with primary_key=True, 26 | # an auto-incrementing primary key will automatically be created and named 'id'. 27 | id = PrimaryKeyField() 28 | email = CharField(index=True, max_length=64) 29 | username = CharField(unique=True, max_length=32) 30 | password = CharField(null=True, max_length=64) 31 | createTime = DateTimeField(column_name="create_time", default=datetime.now) 32 | role = CharField(null=False,max_length=64,default="") 33 | 34 | class Meta: 35 | database = db 36 | table_name = 'tb_user' 37 | # If Models without a Primary Key 38 | # primary_key = False 39 | 40 | def __str__(self): 41 | return "User(id:{} email:{} username:{} password:{} createTime: {})".format(self.id, self.email, self.username, self.password, self.createTime) 42 | 43 | @staticmethod 44 | def create_password(raw): 45 | return hashlib.new("md5", raw).hexdigest() 46 | 47 | def check_password(self, raw): 48 | return hashlib.new("md5", raw).hexdigest() == self.password 49 | 50 | 51 | db.connect() 52 | db.drop_tables([User]) 53 | db.create_tables([User]) 54 | 55 | """ CREATE """ 56 | print("-------------CREATE") 57 | 58 | # 创建User对象 59 | # 明文密码 60 | #user = User.create(email="meetbill@163.com", username="meetbill", password="meet") 61 | # 保存User 62 | #user.save() 63 | 64 | password='111111' 65 | user = User.create(email="meetbill@163.com", username="meetbill", password=User.create_password(password),role="admin") 66 | 67 | # 创建faker工厂对象 68 | faker = Factory.create() 69 | # 利用faker创建多个User对象 70 | fake_users = [{ 71 | 'username': faker.name(), 72 | 'password': faker.word(), 73 | 'email': faker.email(), 74 | } for i in range(5)] 75 | # 批量插入 76 | User.insert_many(fake_users).execute() 77 | 78 | """ RETRIEVE/GET/FIND """ 79 | print("-------------RETRIEVE/GET/FIND") 80 | user = User.select().where(User.id == 1).get() 81 | if user.check_password(password): 82 | print "check password OK" 83 | 84 | user = User.select().where(User.id != 1).get() 85 | print(user) 86 | user = User.select().where(User.username.contains("meet")).get() 87 | print(user) 88 | count = User.select().filter(User.id >= 3).count() 89 | print(count) 90 | users = User.select().order_by(User.email) 91 | for u in users: 92 | print(u) 93 | 94 | """ UPDATE """ 95 | print("-------------UPDATE") 96 | 97 | effect_count = User.update({User.username: "lisi", User.email: "ls@163.com"}).where(User.id == 1).execute() 98 | print(effect_count) 99 | 100 | """ DELETE """ 101 | print("-------------DELETE") 102 | 103 | effect_count = User().delete_by_id(6) 104 | print(effect_count) 105 | effect_count = User.delete().where(User.id >= 4).execute() 106 | print(effect_count) 107 | -------------------------------------------------------------------------------- /My_lib/progressbar/Progressbar.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | #coding=utf8 3 | """ 4 | # Author: meetbill 5 | # Created Time : 2015年11月24日 星期二 01时22分05秒 6 | 7 | # File Name: Progressbar.py 8 | # Description: 9 | 10 | """ 11 | import sys 12 | class Progressbar(object): 13 | def __init__(self,total,block_char='#'): 14 | self.finalcount = total 15 | self.blockcount = 0 16 | self.block_char = block_char 17 | self.cur_num = 0 18 | 19 | # print information 20 | self.f = sys.stdout 21 | if not self.finalcount:return 22 | self.f.write('\n------------------%Progress-----------------------\n') 23 | self.f.write(' 1 2 3 4 5 6 7 8 9 0\n') 24 | self.f.write('----0----0----0----0----0----0----0----0----0----0\n') 25 | 26 | def progress(self,count): 27 | count = min(count,self.finalcount) 28 | if self.finalcount: 29 | # 返回浮点数x的四舍五入值 30 | percentcomplete = int(round(100.0*count/self.finalcount)) 31 | if percentcomplete <1:percentcomplete =1 32 | else: 33 | percentcomplete=100 34 | 35 | # '/'是传统除法,'//'是浮点数除法,结果四舍五入 36 | blockcount = int(percentcomplete//2) 37 | 38 | # 保证下次输入的值比下次的值大 39 | if blockcount <= self.blockcount: 40 | return 41 | for i in range(self.blockcount,blockcount): 42 | 43 | self.f.write("%s"% (self.block_char)) 44 | self.f.flush( ) 45 | self.blockcount = blockcount 46 | if percentcomplete == 100: 47 | self.f.write("\n") 48 | def update(self,num): 49 | """ 50 | update cur_num 51 | Args: 52 | num: num 53 | """ 54 | self.cur_num = self.cur_num + num 55 | self.progress(self.cur_num) 56 | 57 | 58 | if __name__ == "__main__": 59 | from time import sleep 60 | pb = Progressbar(total=150) 61 | for count in range(15): 62 | sleep(0.2) 63 | pb.update(10) 64 | 65 | 66 | -------------------------------------------------------------------------------- /My_lib/progressbar/Progressbar.py_bak: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | #coding=utf8 3 | """ 4 | # Author: Bill 5 | # Created Time : 2015年11月24日 星期二 01时22分05秒 6 | 7 | # File Name: Progressbar.py 8 | # Description: 9 | 10 | """ 11 | import sys 12 | class Progressbar(object): 13 | def __init__(self,finalcount,block_char='.'): 14 | self.finalcount = finalcount 15 | self.blockcount = 0 16 | self.block_char = block_char 17 | 18 | # print information 19 | self.f = sys.stdout 20 | if not self.finalcount:return 21 | self.f.write('\n------------------%Progress-----------------------\n') 22 | #self.f.write(' 1 2 3 4 5 6 7 8 9 0\n') 23 | #self.f.write('----0----0----0----0----0----0----0----0----0----0\n') 24 | 25 | def progress(self,count): 26 | count = min(count,self.finalcount) 27 | if self.finalcount: 28 | # 返回浮点数x的四舍五入值 29 | percentcomplete = int(round(100.0*count/self.finalcount)) 30 | if percentcomplete <1:percentcomplete =1 31 | else: 32 | percentcomplete=100 33 | 34 | # '/'是传统除法,'//'是浮点数除法,结果四舍五入 35 | blockcount = int(percentcomplete//2) 36 | 37 | # 保证下次输入的值比下次的值大 38 | if blockcount <= self.blockcount: 39 | return 40 | for i in range(self.blockcount,blockcount): 41 | self.f.write(self.block_char) 42 | self.f.flush( ) 43 | self.blockcount = blockcount 44 | if percentcomplete == 100: 45 | self.f.write("\n") 46 | 47 | if __name__ == "__main__": 48 | from time import sleep 49 | pb = Progressbar(8,"#") 50 | for count in range(1,9): 51 | pb.progress(count) 52 | sleep(0.2) 53 | 54 | 55 | -------------------------------------------------------------------------------- /My_lib/pymysql/README.md: -------------------------------------------------------------------------------- 1 | ### python 操作 mysql 数据库 2 | 3 | 4 | 5 | * [1 PyMySQL 安装](#1-pymysql-安装) 6 | * [2 连接数据库](#2-连接数据库) 7 | * [3 数据库操作](#3-数据库操作) 8 | * [3.1 创建数据库表](#31-创建数据库表) 9 | * [3.2 数据库插入操作](#32-数据库插入操作) 10 | * [3.3 数据库查询操作](#33-数据库查询操作) 11 | * [3.4 数据库更新操作](#34-数据库更新操作) 12 | * [3.5 删除操作](#35-删除操作) 13 | * [4 异常](#4-异常) 14 | 15 | 16 | 17 | ## 1 PyMySQL 安装 18 | 19 | 在使用 PyMySQL 之前,我们需要确保 PyMySQL 已安装 , 我们可以使用以下命令安装最新版的 PyMySQL: 20 | 21 | ```shell 22 | $ pip install PyMySQL 23 | ``` 24 | 25 | 如果你的系统不支持 pip 命令,可以使用以下方式安装: 26 | 27 | ```shell 28 | $ git clone https://github.com/PyMySQL/PyMySQL 29 | $ cd PyMySQL/ 30 | $ python setup.py install 31 | ``` 32 | 33 | 如果需要制定版本号,可以使用 curl 命令来安装: 34 | 35 | ```shell 36 | $ # X.X 为 PyMySQL 的版本号 37 | $ curl -L https://github.com/PyMySQL/PyMySQL/tarball/pymysql-X.X | tar xz 38 | $ cd PyMySQL* 39 | $ python setup.py install 40 | ``` 41 | 42 | ## 2 连接数据库 43 | 44 | ```python 45 | import pymysql 46 | 47 | # 打开数据库连接 48 | db = pymysql.connect("localhost","root","root","test" ) 49 | 50 | # 使用 cursor() 方法创建一个游标对象 cursor 51 | cursor = db.cursor() 52 | 53 | # 使用 execute() 方法执行 SQL 查询 54 | cursor.execute("SELECT VERSION()") 55 | 56 | # 使用 fetchone() 方法获取单条数据。 57 | data = cursor.fetchone() 58 | 59 | print ("Database version : %s " % data) 60 | 61 | # 关闭数据库连接 62 | db.close() 63 | ``` 64 | ## 3 数据库操作 65 | ### 3.1 创建数据库表 66 | 67 | ```python 68 | import pymysql 69 | 70 | # 打开数据库连接 71 | db = pymysql.connect("localhost","root","root","test" ) 72 | 73 | # 使用 cursor() 方法创建一个游标对象 cursor 74 | cursor = db.cursor() 75 | 76 | # 使用 execute() 方法执行 SQL,如果表存在则删除 77 | cursor.execute("DROP TABLE IF EXISTS EMPLOYEE") 78 | 79 | # 使用预处理语句创建表 80 | sql = """CREATE TABLE EMPLOYEE ( 81 | FIRST_NAME CHAR(20) NOT NULL, 82 | LAST_NAME CHAR(20), 83 | AGE INT, 84 | SEX CHAR(1), 85 | INCOME FLOAT )""" 86 | 87 | cursor.execute(sql) 88 | 89 | # 关闭数据库连接 90 | db.close() 91 | ``` 92 | 93 | ### 3.2 数据库插入操作 94 | 95 | ```python 96 | import pymysql 97 | 98 | # 打开数据库连接 99 | db = pymysql.connect("localhost","root","root","test" ) 100 | 101 | # 使用 cursor() 方法获取操作游标 102 | cursor = db.cursor() 103 | 104 | # SQL 插入语句 105 | sql = """INSERT INTO EMPLOYEE(FIRST_NAME, 106 | LAST_NAME, AGE, SEX, INCOME) 107 | VALUES ('Mac', 'Mohan', 20, 'M', 2000)""" 108 | try: 109 | # 执行 sql 语句 110 | cursor.execute(sql) 111 | # 提交到数据库执行 112 | db.commit() 113 | except: 114 | # 如果发生错误则回滚 115 | db.rollback() 116 | 117 | # 关闭数据库连接 118 | db.close() 119 | ``` 120 | 121 | ### 3.3 数据库查询操作 122 | 123 | Python 查询 Mysql 使用 fetchone() 方法获取单条数据,使用 fetchall() 方法获取多条数据。 124 | 125 | ```python 126 | import pymysql 127 | 128 | # 打开数据库连接 129 | db = pymysql.connect("localhost","root","root","test" ) 130 | 131 | # 使用 cursor() 方法获取操作游标 132 | cursor = db.cursor() 133 | 134 | # SQL 查询语句 135 | sql = '''SELECT * FROM EMPLOYEE WHERE INCOME > 1000''' 136 | try: 137 | # 执行 SQL 语句 138 | cursor.execute(sql) 139 | 140 | # 获取所有记录列表 141 | results = cursor.fetchall() 142 | for row in results: 143 | fname = row[0] 144 | lname = row[1] 145 | age = row[2] 146 | sex = row[3] 147 | income = row[4] 148 | # 打印结果 149 | print ("fname=%s,lname=%s,age=%d,sex=%s,income=%d" % \ 150 | (fname, lname, age, sex, income )) 151 | """ 152 | # 执行 SQL 语句,并接收 execute() 返回值 153 | print("影响了 %d 条数据" % cursor.execute(sql)) 154 | # 获取单条记录列表,返回值是元组 155 | print (cursor.fetchone()) 156 | """ 157 | except: 158 | print ("Error: unable to fecth data") 159 | 160 | # 关闭数据库连接 161 | db.close() 162 | ``` 163 | 164 | ### 3.4 数据库更新操作 165 | 166 | 以下实例将 TESTDB 表中的 SEX 字段全部修改为 'M',AGE 字段递增 1 167 | 168 | ```python 169 | import pymysql 170 | 171 | # 打开数据库连接 172 | db = pymysql.connect("localhost","root","root","test" ) 173 | 174 | # 使用 cursor() 方法获取操作游标 175 | cursor = db.cursor() 176 | 177 | # SQL 更新语句 178 | sql = "UPDATE EMPLOYEE SET AGE = AGE + 1 WHERE SEX = '%c'" % ('M') 179 | try: 180 | # 执行 SQL 语句 181 | cursor.execute(sql) 182 | # 提交到数据库执行 183 | db.commit() 184 | except: 185 | # 发生错误时回滚 186 | db.rollback() 187 | 188 | # 关闭数据库连接 189 | db.close() 190 | ``` 191 | 192 | ### 3.5 删除操作 193 | 194 | ``` 195 | import pymysql 196 | 197 | # 打开数据库连接 198 | db = pymysql.connect("localhost","root","root","test" ) 199 | 200 | # 使用 cursor() 方法获取操作游标 201 | cursor = db.cursor() 202 | 203 | # SQL 删除语句 204 | sql = "DELETE FROM EMPLOYEE WHERE AGE > '%d'" % (20) 205 | try: 206 | # 执行 SQL 语句 207 | cursor.execute(sql) 208 | # 提交修改 209 | db.commit() 210 | except: 211 | # 发生错误时回滚 212 | db.rollback() 213 | 214 | # 关闭连接 215 | db.close() 216 | ``` 217 | 218 | ## 4 异常 219 | 事务应该具有 4 个属性:原子性、一致性、隔离性、持久性。这四个属性通常称为 ACID 特性。Python DB API 2.0 的事务提供了两个方法 commit 或 rollback。 220 | 221 | DB API 中定义了一些数据库操作的错误及异常,下表列出了这些错误和异常: 222 | 223 | | 异常 | 描述 | 224 | | ----------------- | ------------------------------------------------------------ | 225 | | Warning | 当有严重警告时触发,例如插入数据是被截断等等。必须是 StandardError 的子类。 | 226 | | Error | 警告以外所有其他错误类。必须是 StandardError 的子类。 | 227 | | InterfaceError | 当有数据库接口模块本身的错误(而不是数据库的错误)发生时触发。 必须是 Error 的子类。 | 228 | | DatabaseError | 和数据库有关的错误发生时触发。 必须是 Error 的子类。 | 229 | | DataError | 当有数据处理时的错误发生时触发,例如:除零错误,数据超范围等等。 必须是 DatabaseError 的子类。 | 230 | | OperationalError | 指非用户控制的,而是操作数据库时发生的错误。例如:连接意外断开、 数据库名未找到、事务处理失败、内存分配错误等等操作数据库是发生的错误。 必须是 DatabaseError 的子类。 | 231 | | IntegrityError | 完整性相关的错误,例如外键检查失败等。必须是 DatabaseError 子类。 | 232 | | InternalError | 数据库的内部错误,例如游标(cursor)失效了、事务同步失败等等。 必须是 DatabaseError 子类。 | 233 | | ProgrammingError | 程序错误,例如数据表(table)没找到或已存在、SQL 语句语法错误、 参数数量错误等等。必须是 DatabaseError 的子类。 | 234 | | NotSupportedError | 不支持错误,指使用了数据库不支持的函数或 API 等。例如在连接对象上 使用.rollback() 函数,然而数据库并不支持事务或者事务已关闭。 必须是 DatabaseError 的子类。 | 235 | 236 | -------------------------------------------------------------------------------- /My_lib/query_ip/QQWry.Dat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/meetbill/MyPythonLib/fd58b8a112ad366fba67771cafd752f0b8f378ee/My_lib/query_ip/QQWry.Dat -------------------------------------------------------------------------------- /My_lib/query_ip/README.md: -------------------------------------------------------------------------------- 1 | # query_ip 2 | 3 | 根据IP查询地理位置信息 4 | 5 | 早之前使用获取`ip138`接口进行获取某个 IP 的地理位置信息,但目前`ip138`的ip查询接口计划商用化,使用起来就会不太方便 6 | 7 | ## 使用方法 8 | 9 | 命令行下 10 | ``` 11 | #python query_ip.py "114.114.114.114" 12 | ``` 13 | -------------------------------------------------------------------------------- /My_lib/query_ip/query_ip.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding: utf-8 3 | 4 | # from: http://linuxtoy.org/files/pyip.py 5 | # Blog: http://linuxtoy.org/archives/python-ip.html 6 | # Modified by Demon 7 | # Blog: http://demon.tw/programming/python-qqwry-dat.html 8 | 9 | '''用Python脚本查询纯真IP库 10 | 11 | QQWry.Dat的格式如下: 12 | 13 | +----------+ 14 | | 文件头 | (8字节) 15 | +----------+ 16 | | 记录区 | (不定长) 17 | +----------+ 18 | | 索引区 | (大小由文件头决定) 19 | +----------+ 20 | 21 | 文件头:4字节开始索引偏移值+4字节结尾索引偏移值 22 | 23 | 记录区: 每条IP记录格式 ==> IP地址[国家信息][地区信息] 24 | 25 | 对于国家记录,可以有三种表示方式: 26 | 27 | 字符串形式(IP记录第5字节不等于0x01和0x02的情况), 28 | 重定向模式1(第5字节为0x01),则接下来3字节为国家信息存储地的偏移值 29 | 重定向模式(第5字节为0x02), 30 | 31 | 对于地区记录,可以有两种表示方式: 字符串形式和重定向 32 | 33 | 最后一条规则:重定向模式1的国家记录后不能跟地区记录 34 | 35 | 索引区: 每条索引记录格式 ==> 4字节起始IP地址 + 3字节指向IP记录的偏移值 36 | 37 | 索引区的IP和它指向的记录区一条记录中的IP构成一个IP范围。查询信息是这个 38 | 范围内IP的信息 39 | 40 | ''' 41 | 42 | import sys 43 | import socket 44 | from struct import pack, unpack 45 | 46 | class IPInfo(object): 47 | '''QQWry.Dat数据库查询功能集合 48 | ''' 49 | def __init__(self, dbname): 50 | ''' 初始化类,读取数据库内容为一个字符串, 51 | 通过开始8字节确定数据库的索引信息''' 52 | 53 | self.dbname = dbname 54 | # f = file(dbname, 'r') 55 | 56 | # Demon注:在Windows下用'r'会有问题,会把\r\n转换成\n 57 | # 详见http://demon.tw/programming/python-open-mode.html 58 | # 还有Python文档中不提倡用file函数来打开文件,推荐用open 59 | f = open(dbname, 'rb') 60 | 61 | self.img = f.read() 62 | f.close() 63 | 64 | # QQWry.Dat文件的开始8字节是索引信息,前4字节是开始索引的偏移值, 65 | # 后4字节是结束索引的偏移值。 66 | # (self.firstIndex, self.lastIndex) = unpack('II', self.img[:8]) 67 | 68 | # Demon注:unpack默认使用的endian是和机器有关的 69 | # Intel x86和AMD64(x86-64)是little-endian 70 | # Motorola 68000和PowerPC G5是big-endian 71 | # 而纯真数据库全部采用了little-endian字节序 72 | # 所以在某些big-endian的机器上原代码会出错 73 | (self.firstIndex, self.lastIndex) = unpack(' 14 | 15 | * [1 安装](#1-安装) 16 | * [2 导出 RDB 数据到 csv 文件](#2-导出-rdb-数据到-csv-文件) 17 | * [3 把 RDB 文件导出到 JSON](#3-把-rdb-文件导出到-json) 18 | * [4 比较功能](#4-比较功能) 19 | * [5 协议转换功能](#5-协议转换功能) 20 | 21 | 22 | ## 1 安装 23 | 24 | > 地址 25 | ``` 26 | https://github.com/sripathikrishnan/redis-rdb-tools 27 | ``` 28 | > 使用 29 | ``` 30 | cd redis-rdb-tools 31 | cp rdbtools/cli/rdb.py . 32 | chmod +x rdb.py 33 | ``` 34 | ## 2 导出 RDB 数据到 csv 文件 35 | ``` 36 | cd redis-rdb-tools 37 | ./rdb.py -c memory dump.rdb > result.csv 38 | ``` 39 | csv(Excel) 文件中的列信息如下: 40 | 41 | > * database:数据库编号 42 | > * type:数据结构类型 43 | > * key:k-v 中的 key 44 | > * memory_size(bytes):内存占用(字节数) 45 | > * encoding:编码 46 | > * num_elements:集合元素数 47 | > * len_largest_element:集合中最长的一个元素的长度 48 | > * expiry:过期时间 49 | 50 | > 输出大于 1M 的 key 51 | ``` 52 | python rdb.py -c memory ./dump.rdb --bytes 1024000 53 | ``` 54 | 55 | ## 3 把 RDB 文件导出到 JSON 56 | ``` 57 | cd redis-rdb-tools 58 | ./rdb.py -c json dump.rdb > result.json 59 | ``` 60 | 为了更简洁,文档后边将只使用安装的方式进行举例 61 | 注意,这里是把 RDB 文件中的数据导出,而不是把统计数据导出。 62 | json 结果举例: 63 | 64 | [ 65 | { 66 | "s1": [ 67 | "f", 68 | "b", 69 | "a", 70 | "e", 71 | "d", 72 | "c" 73 | ], 74 | "key2": "value22222", 75 | "list1": [ 76 | "k1", 77 | "k2", 78 | "k333333", 79 | "k4" 80 | ], 81 | "key3": "value3", 82 | "key1": "value1", 83 | "z1": { 84 | "a": 1, 85 | "b": 2, 86 | "c": 3 87 | } 88 | } 89 | ] 90 | 还可以通过例如如下方式进行更细粒度的筛选数据 91 | 92 | > 根据表达式过滤 key 的范围 93 | ``` 94 | rdb -c json --key "user.*" dump.rdb 95 | ``` 96 | > 只导出数据库 2 中的 hash 数据 97 | ``` 98 | rdb -c json --db 2 --type hash --key "a.*" dump.rdb 99 | ``` 100 | ## 4 比较功能 101 | 这个工具提供了简单的针对 diff 做优化的格式化输出功能。 102 | 例如比较 dump1.rdb 和 dump2.rdb,如下操作: 103 | 104 | ``` 105 | # 分别使用 diff 后输出到文件 106 | rdb -c diff dump1.rdb | sort > dump1.txt 107 | rdb -c diff dump2.rdb | sort > dump2.txt 108 | # 使用任意一个 diff 工具进行比较 109 | kdiff3 dump1.txt dump2.txt 110 | 如果文件太大,可以加上—key=regex 等参数进行数据筛选。 111 | ``` 112 | 113 | ## 5 协议转换功能 114 | 把 RDV 文件转换成 redis protocol 115 | 116 | ``` 117 | rdb -c protocol dump.rdb 118 | *4 119 | $4 120 | HSET 121 | $9 122 | users:123 123 | $9 124 | firstname 125 | $8 126 | Sripathi 127 | ``` 128 | 同样,如果只想指定部分数据,可以使用—key 等命令进行过滤。这也为数据分片提供了可能。使用—key 将数据分成多片,通过 redis protocol 格式进行网络传输。 129 | 130 | 导出图形化统计网页功能 131 | 这个功能是原作者留下的彩蛋,相当实用,能够把 RDB 文件中的数据进行统计后,以饼图和柱状图的方式,输出到网页上。 132 | 由于当时原作者还未将该功能放到安装包里整合成命令,所以需要进到源码目录中去执行这个工具。 133 | ``` 134 | cd redis-rdb-tools/rdbtools/cli/ 135 | python redis_profiler.py /yourpath/dump.rdb > show.html 136 | ``` 137 | 需要注意的是,html 中依赖 google 的 cdn 中的一些工具,暂时需要翻墙才能渲染出来…… 138 | 139 | 在代码中使用 Parser 140 | 通过继承 RdbCallback 类,可以自己通过回调实现更多功能。 141 | 142 | ``` 143 | import sys 144 | from rdbtools import RdbParser, RdbCallback 145 | class MyCallback(RdbCallback) : 146 | ''' Simple example to show how callback works. 147 | See RdbCallback for all available callback methods. 148 | See JsonCallback for a concrete example 149 | ''' 150 | def set(self, key, value, expiry): 151 | print('%s = %s' % (str(key), str(value))) 152 | def hset(self, key, field, value): 153 | print('%s.%s = %s' % (str(key), str(field), str(value))) 154 | def sadd(self, key, member): 155 | print('%s has {%s}' % (str(key), str(member))) 156 | def rpush(self, key, value) : 157 | print('%s has [%s]' % (str(key), str(value))) 158 | def zadd(self, key, score, member): 159 | print('%s has {%s : %s}' % (str(key), str(member), str(score))) 160 | callback = MyCallback() 161 | parser = RdbParser(callback) 162 | parser.parse('dump.rdb') 163 | ``` 164 | -------------------------------------------------------------------------------- /My_lib/schema/README.md: -------------------------------------------------------------------------------- 1 | ## schema 2 | 3 | 4 | * [1 功能](#1-功能) 5 | * [2 使用方法](#2-使用方法) 6 | * [2.1 检查数字](#21-检查数字) 7 | * [2.2 检查字符串](#22-检查字符串) 8 | * [2.3 检查字典](#23-检查字典) 9 | * [2.4 检查列表中字典](#24-检查列表中字典) 10 | * [3 管理此目录](#3-管理此目录) 11 | * [4 FAQs](#4-faqs) 12 | * [4.1 Schema 传入字典很好用,但是我有的数据是可选的,也就是说有的 key 可以不提供怎么办?](#41-schema-传入字典很好用但是我有的数据是可选的也就是说有的-key-可以不提供怎么办) 13 | * [4.2 我想让 Schema 只验证传入字典中的一部分数据,可以有多余的 key 但是不要抱错,怎么做?](#42-我想让-schema-只验证传入字典中的一部分数据可以有多余的-key-但是不要抱错怎么做) 14 | * [4.3 Schema 抛出的异常信息不是很友好,我想自定义错误信息,怎么办?](#43-schema-抛出的异常信息不是很友好我想自定义错误信息怎么办) 15 | 16 | 17 | 18 | ## 1 功能 19 | 20 | 参数校验 21 | 22 | > * [原项目地址](https://github.com/keleshev/schema) 23 | 24 | ## 2 使用方法 25 | 26 | ### 2.1 检查数字 27 | ``` 28 | import schema 29 | 30 | # 检查数字 31 | print '----------------------int' 32 | print schema.Schema(int).validate(123) 33 | print schema.Schema(int).is_valid(123) 34 | ``` 35 | ### 2.2 检查字符串 36 | ``` 37 | # 检查字符串 38 | print '----------------------str' 39 | # Regex 没有 is_valid 方法 40 | print schema.Regex(r'^foo').validate('foobar') 41 | print schema.Schema(lambda n: "foo" in n).is_valid('foobar') 42 | print 'False:%s ' % schema.Schema(lambda n: "foo" in n).is_valid('fobar') 43 | 44 | ``` 45 | ### 2.3 检查字典 46 | ``` 47 | 48 | # 检查字典 49 | print '----------------------dict' 50 | rules = { 51 | 'name': schema.And(str, len), 52 | 'age': schema.And(schema.Use(int), lambda n: 18 <= n <= 99), 53 | schema.Optional('gender'): schema.And(str, schema.Use(str.lower),lambda s: s in ('squid', 'kid'))} 54 | 55 | data = {'name': 'Sue', 'age': '28', 'gender': 'Squid'} 56 | 57 | print schema.Schema(rules).validate(data) 58 | print schema.Schema(rules).is_valid(data) 59 | 60 | ``` 61 | ### 2.4 检查列表中字典 62 | ``` 63 | print '----------------------list-dict' 64 | rules = [{ 65 | 'name': schema.And(str, len), 66 | 'age': schema.And(schema.Use(int), lambda n: 18 <= n <= 99), 67 | schema.Optional('gender'): schema.And(str, schema.Use(str.lower),lambda s: s in ('squid', 'kid'))}] 68 | 69 | data = [{'name': 'Sue', 'age': '28', 'gender': 'Squid'}, 70 | {'name': 'Sam', 'age': '42'}, 71 | {'name': 'Sacha', 'age': '20', 'gender': 'KID'}] 72 | 73 | print schema.Schema(rules).validate(data) 74 | print schema.Schema(rules).is_valid(data) 75 | ``` 76 | ## 3 管理此目录 77 | 78 | > * https://github.com/keleshev/schema 79 | > * 将原项目中的 schema.py 重命名为 `schema/__init__.py` 80 | 81 | ## 4 FAQs 82 | 83 | ### 4.1 Schema 传入字典很好用,但是我有的数据是可选的,也就是说有的 key 可以不提供怎么办? 84 | ``` 85 | from schema import Optional, Schema 86 | 87 | 88 | Schema({'name': str, Optional('age'): int}).validate({'name': 'foobar'}) 89 | {'name': 'foobar'} 90 | Schema({'name': str, Optional('age', default=18): int}).validate({'name': 'foobar'}) 91 | {'age': 18, 'name': 'foobar'} 92 | ``` 93 | ### 4.2 我想让 Schema 只验证传入字典中的一部分数据,可以有多余的 key 但是不要抱错,怎么做? 94 | ``` 95 | Schema({'name': str, 'age': int}, ignore_extra_keys=True).validate({'name': 'foobar', 'age': 100, 'sex': 'male'}) 96 | {'age': 100, 'name': 'foobar'} 97 | ``` 98 | ### 4.3 Schema 抛出的异常信息不是很友好,我想自定义错误信息,怎么办? 99 | Schema 自带的类(Use、And、Or、Regex、Schema 等)都有一个参数 error,可以自定义错误信息 100 | ``` 101 | Schema({'name': str, 'age': Use(int, error='年龄必须是整数')}).validate({'name': 'foobar', 'age': 'abc'}) 102 | SchemaError: 年龄必须是整数 103 | ``` 104 | 如 105 | ``` 106 | { 107 | schema.Optional('region'): schema.And(lambda n: n in ["bj", "gz"], error="region mast in bj/gz"), 108 | } 109 | ``` 110 | -------------------------------------------------------------------------------- /My_lib/schema/demo.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | #coding=utf8 3 | """ 4 | # Author: meetbill 5 | # Created Time : 2018-12-16 17:29:44 6 | 7 | # File Name: demo.py 8 | # Description: 9 | 10 | """ 11 | import schema 12 | 13 | # 检查数字 14 | print '----------------------int' 15 | print schema.Schema(int).validate(123) 16 | print schema.Schema(int).is_valid(123) 17 | 18 | # 检查字符串 19 | print '----------------------str' 20 | # Regex 没有 is_valid 方法 21 | print schema.Regex(r'^foo').validate('foobar') 22 | print schema.Schema(lambda n: "foo" in n).is_valid('foobar') 23 | print 'False:%s ' % schema.Schema(lambda n: "foo" in n).is_valid('fobar') 24 | 25 | # 检查字典 26 | print '----------------------dict' 27 | rules = { 28 | 'name': schema.And(str, len), 29 | 'age': schema.And(schema.Use(int), lambda n: 18 <= n <= 99), 30 | schema.Optional('gender'): schema.And(str, schema.Use(str.lower),lambda s: s in ('squid', 'kid'))} 31 | 32 | data = {'name': 'Sue', 'age': '28', 'gender': 'Squid'} 33 | 34 | print schema.Schema(rules).validate(data) 35 | print schema.Schema(rules).is_valid(data) 36 | 37 | print '----------------------list-dict' 38 | rules = [{ 39 | 'name': schema.And(str, len), 40 | 'age': schema.And(schema.Use(int), lambda n: 18 <= n <= 99), 41 | schema.Optional('gender'): schema.And(str, schema.Use(str.lower),lambda s: s in ('squid', 'kid'))}] 42 | 43 | data = [{'name': 'Sue', 'age': '28', 'gender': 'Squid'}, 44 | {'name': 'Sam', 'age': '42'}, 45 | {'name': 'Sacha', 'age': '20', 'gender': 'KID'}] 46 | 47 | print schema.Schema(rules).validate(data) 48 | print schema.Schema(rules).is_valid(data) 49 | -------------------------------------------------------------------------------- /My_lib/serverinfo_config/README.md: -------------------------------------------------------------------------------- 1 | # sc_si 2 | 3 | ## si.py 4 | 5 | si.py 可获取linux系统信息,可以获得如下信息 6 | 7 | + CPU 8 | + network(网卡和dns信息) 9 | + disk 10 | + memory 11 | + 磁盘挂载点 12 | 13 | ## sc.py 14 | 15 | sc.py 可以进行修改linux配置文件,可以修改以下参数 16 | 17 | + 网卡配置 18 | + dns信息 19 | 20 | [详细信息](https://github.com/BillWang139967/MyPythonLib/wiki/si_sc) 21 | -------------------------------------------------------------------------------- /My_lib/ttable/README.md: -------------------------------------------------------------------------------- 1 | ## 终端表格(ttable) 2 | 3 | 4 | * [效果展示](#效果展示) 5 | * [使用](#使用) 6 | * [表格中内容颜色](#表格中内容颜色) 7 | * [表格中对齐方式](#表格中对齐方式) 8 | * [设置默认对齐方式](#设置默认对齐方式) 9 | * [循环输出表格内容](#循环输出表格内容) 10 | * [其他](#其他) 11 | 12 | 13 | 14 | ## 效果展示 15 | 16 | ``` 17 | +-----------------------------------------------------------------------------------------------------------------+ 18 | | ttable with complicated header | 19 | +--------------------------------+--------------------------------------------------------------------------------+ 20 | | | I/O stats last min | 21 | | info +---------------------+-----------------------------------+----------------------+ 22 | | | transfer | max time | # of ops | 23 | +-------------+------------------+----------+----------+-----------+----------+------------+------+-------+-------+ 24 | | IP | last error | read | write | read | write | fsync | read | write | fsync | 25 | +-------------+------------------+----------+----------+-----------+----------+------------+------+-------+-------+ 26 | | 192.168.1.1 | no errors | 19 MiB/s | 27 MiB/s | 263625 us | 43116 us | 262545 us | 3837 | 3295 | 401 | 27 | | 192.168.1.2 | no errors | 25 MiB/s | 29 MiB/s | 340303 us | 89168 us | 223610 us | 2487 | 2593 | 366 | 28 | | 192.168.1.3 | 2012-10-12 07:27 | - | - | - | - | - | - | - | - | 29 | | 192.168.1.4 | no errors | - | - | - | - | - | - | - | - | 30 | | 192.168.1.5 | no errors | 17 MiB/s | 11 MiB/s | 417292 us | 76333 us | 1171903 us | 2299 | 2730 | 149 | 31 | +-------------+------------------+----------+----------+-----------+----------+------------+------+-------+-------+ 32 | ``` 33 | 34 | ## 使用 35 | 36 | ``` 37 | x = ttable("Test title",4) 38 | x.header("column1","column2","column3","column4") 39 | x.append("t1","t2","very long entry","test") 40 | x.append(("r","r3"),("l","l2"),"also long entry","test") 41 | print x 42 | ``` 43 | 44 | ### 表格中内容颜色 45 | 46 | * 白色----------0 47 | * 红色----------1 48 | * 橘黄色--------2 49 | * 黄色----------3 50 | * 绿色----------4 51 | * 青色----------5 52 | * 蓝色----------6 53 | * 品红----------7 54 | * 灰色----------8 55 | 56 | ``` 57 | ("txt","3"),其中的数字则为颜色 58 | ``` 59 | ### 表格中对齐方式 60 | 61 | * l 靠左 62 | * r 靠右 63 | * c 居中 64 | ``` 65 | ("txt","l") 66 | ``` 67 | #### 设置默认对齐方式 68 | 69 | ``` 70 | x.defattr("l","l") 71 | ``` 72 | ### 循环输出表格内容 73 | 74 | ``` 75 | x = ttable("host info",2) 76 | x.defattr("l","l") 77 | x.header("host_group","host") 78 | elements={ 79 | "host_group1":["127.0.0.1","127.0.0.2","127.0.0.3"], 80 | "host_group2":["127.0.0.1","127.0.0.2"] 81 | } 82 | lastpos = len(elements) - 1 83 | for i, host_group in enumerate(elements): 84 | ip_list = elements[host_group] 85 | for j,ip in enumerate(ip_list): 86 | # 填充数据 87 | if j == 0: 88 | x.append(host_group,ip) 89 | else: 90 | x.append("",ip) 91 | # 分割线 92 | if j < (len(ip_list)-1): 93 | x.append("", "---") 94 | # 分割线 95 | if i < lastpos: 96 | x.append("---", "---") 97 | print x 98 | ``` 99 | 如上 100 | 101 | * 填充数据的部分在最里面的循环中,因为第一行的内容和其他内容不同,所以需要判断是否是第一行并填充不同数据 102 | * 为防止最后一条分割线与下个表格的分割线重复,则通过判断是否为循环的最后一个元素,最后一个元素则不输出分割线 103 | ## 其他 104 | 105 | 其他可以直接执行 `table.py` 查看效果 106 | -------------------------------------------------------------------------------- /My_lib/validator/README.md: -------------------------------------------------------------------------------- 1 | ## validator 2 | 3 | 4 | 5 | 6 | * [1 功能](#1-功能) 7 | * [2 同类软件](#2-同类软件) 8 | * [3 使用方法](#3-使用方法) 9 | * [4 方法说明](#4-方法说明) 10 | 11 | 12 | 13 | ## 1 功能 14 | 15 | 参数校验 16 | 17 | [validator](https://github.com/mansam/validator.py) 18 | 19 | ## 2 同类软件 20 | 21 | > * https://github.com/keleshev/schema 22 | > * https://github.com/kvesteri/validators 23 | 24 | ## 3 使用方法 25 | 26 | ``` 27 | from validator import Required, Not, Truthy, Blank, Range, Equals, In, validate 28 | 29 | # let's say that my dictionary needs to meet the following rules... 30 | rules = { 31 | "foo": [Required, Equals(123)], 32 | "bar": [Required, Truthy()], 33 | "baz": [In(["spam", "eggs", "bacon"])], 34 | "qux": [Not(Range(1, 100))] # by default, Range is inclusive 35 | } 36 | 37 | # then this following dict would pass: 38 | passes = { 39 | "foo": 123, 40 | "bar": True, # or a non-empty string, or a non-zero int, etc... 41 | "baz": "spam", 42 | "qux": 101 43 | } 44 | print validate(rules, passes) 45 | # (True, {}) 46 | 47 | # but this one would fail 48 | fails = { 49 | "foo": 321, 50 | "bar": False, # or 0, or [], or an empty string, etc... 51 | "baz": "barf", 52 | "qux": 99 53 | } 54 | print validate(rules, fails) 55 | # (False, 56 | # { 57 | # 'foo': ["must be equal to '123'"], 58 | # 'bar': ['must be True-equivalent value'], 59 | # 'baz': ["must be one of ['spam', 'eggs', 'bacon']"], 60 | # 'qux': ['must not fall between 1 and 100'] 61 | # }) 62 | ``` 63 | 64 | ## 4 方法说明 65 | 66 | > * key 检查 67 | > * Required 68 | > * value 检查 69 | > * 常规 70 | > * In: 检查 value 是否在给的列表中,如:`"field": [In([1, 2, 3])]` 71 | > * Truthy: 检查 value 是否有值或为 Ture,如:`"field": [Truthy()]` 72 | > * Not: 检测 value 不在所给的条件中 73 | > * Equals: 检查 value 是否为某值,如:`Equals` 74 | > * 数字 75 | > * GreaterThan: 检查 value 是否 > 某数字,如:`"field": [GreaterThan(10)]` 76 | > * Range:检查 self.start <= value <= self.end,如:`"field": [Range(0, 10)]` 77 | > * 字符串 78 | > * Blank: 检查 value 是否为`''`,如:`"field": [Blank()]` 79 | > * Length:字符串长度是否在所给条件之间,如:`"field": [Length(0, maximum=5)]` 80 | > * Contains:字符串中是否有某个字符串,如:`"field": [Contains("test")]` 81 | > * 列表 82 | > * Contains:列表中是否有某个元素,如:`"field": [Contains(3)]` 83 | -------------------------------------------------------------------------------- /My_lib/w_mpms/demo.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | """ 3 | Do parallel python works easily in multithreads in multiprocesses 4 | 一个简单的多进程-多线程工作框架 5 | 6 | 工作模型: 7 | 主线程不断向队列中添加任务参数 8 | 外部进程的大量线程(工作函数)不断从任务队列中读取参数,并行执行后将结果加入到结果队列 9 | 主线程中新开一个处理线程,不断从结果队列读取并依此处理 10 | 11 | Due to many threads, some time-consuming tasks would finish much faster than single threads 12 | 可以显著提升某些长时间等待的工作的效率,如网络访问 13 | """ 14 | from __future__ import unicode_literals, print_function 15 | from time import time, sleep 16 | 17 | import w_lib.mpms 18 | 19 | def _worker(index, t=None): 20 | """ 21 | Worker function, accept task parameters and do actual work 22 | should be able to accept at least one arg 23 | ALWAYS works in external thread in external process 24 | 25 | 工作函数,接受任务参数,并进行实际的工作 26 | 总是工作在外部进程的线程中 (即不工作在主进程中) 27 | """ 28 | sleep(0.2) # delay 0.2 second 29 | #print(index, t) 30 | 31 | # worker's return value will be added to product queue, waiting handler to handle 32 | # you can return any type here (Included the None , of course) 33 | # worker函数的返回值会被加入到队列中,供handler依次处理,返回值允许除了 StopIteration 以外的任何类型 34 | return index, "hello world" 35 | def main(): 36 | results = "" 37 | # we will run the benchmarks several times using the following params 38 | # 下面这些值用于多次运行,看时间 39 | # Init the poll # 初始化 40 | m = w_lib.mpms.MPMS(_worker,processes=1,threads=10) 41 | m.start() # start and fork subprocess 42 | start_time = time() # when we started # 记录开始时间 43 | 44 | # put task parameters into the task queue, 2000 total tasks 45 | # 把任务加入任务队列,一共2000次 46 | for i in range(200): 47 | m.put(i, t=time()) 48 | 49 | # close task queue and wait all workers and handler to finish 50 | # 等待全部任务及全部结果处理完成 51 | m.join() 52 | print(m.get_result()) 53 | 54 | # write and print records 55 | # 下面只是记录和打印结果 56 | results += " TotalTime: " + str(time() - start_time) 57 | print(results) 58 | 59 | if __name__ == '__main__': 60 | import sys, inspect 61 | if len(sys.argv) < 2: 62 | print("Usage:") 63 | for k, v in sorted(globals().items(), key=lambda item: item[0]): 64 | if inspect.isfunction(v) and k[0] != "_": 65 | args, __, __, defaults = inspect.getargspec(v) 66 | if defaults: 67 | print(sys.argv[0], k, str(args[:-len(defaults)])[1:-1].replace(",", ""), \ 68 | str(["%s=%s" % (a, b) for a, b in zip(args[-len(defaults):], defaults)])[1:-1].replace(",", "")) 69 | else: 70 | print(sys.argv[0], k, str(v.func_code.co_varnames[:v.func_code.co_argcount])[1:-1].replace(",", "")) 71 | sys.exit(-1) 72 | else: 73 | func = eval(sys.argv[1]) 74 | args = sys.argv[2:] 75 | try: 76 | r = func(*args) 77 | except Exception, e: 78 | print("Usage:") 79 | print("\t", "python %s" % sys.argv[1], str(func.func_code.co_varnames[:func.func_code.co_argcount])[1:-1].replace(",", "")) 80 | if func.func_doc: 81 | print("\n".join(["\t\t" + line.strip() for line in func.func_doc.strip().split("\n")])) 82 | print(e) 83 | r = -1 84 | import traceback 85 | traceback.print_exc() 86 | if isinstance(r, int): 87 | sys.exit(r) 88 | -------------------------------------------------------------------------------- /My_lib/w_mpms/readme.md: -------------------------------------------------------------------------------- 1 | # w_mpms 2 | 3 | 4 | * [1 简介](#1-简介) 5 | * [2 run](#2-run) 6 | * [version](#version) 7 | 8 | 9 | 10 | ## 1 简介 11 | Simple python Multiprocesses-Multithreads queue 12 | 简易 Python 多进程 - 多线程任务队列 13 | (自用,ap 不为生产环境下造成的任何损失和灵异现象负责) 14 | 15 | 在多个进程的多个线程的 worker 中完成耗时的任务,并在主进程的 collector 中处理结果 16 | 17 | 支持 python 2.7/3.4+ 18 | 19 | ## 2 run 20 | 21 | ```python 22 | from w_lib.mpms import MPMS 23 | import random 24 | import time 25 | 26 | def worker(i, j=None): 27 | time.sleep(3) 28 | return i,j 29 | 30 | def main(): 31 | m = MPMS( 32 | worker, 33 | processes=1, 34 | threads=100, # 每进程的线程数 35 | ) 36 | m.start() 37 | for i in range(200): # 你可以自行控制循环条件 38 | m.put(i, random.randint(0,99)) # 这里的参数列表就是 worker 接受的参数 39 | m.join() 40 | result = m.get_result() 41 | print result 42 | 43 | if __name__ == '__main__': 44 | main() 45 | ``` 46 | 更多请看 `demo.py` 47 | 48 | ### version 49 | 50 | > * V2.0.0.3 51 | > * (1) 日志中输出目前任务进度信息 52 | > * V2.0.0.2 53 | > * (1) 去掉了 collector 函数 54 | > * (2) 去掉了 Meta 类 55 | > * V2.0.0.1 56 | > * (1) 增加输出日志到 /tmp/mpms.log 57 | > * (2) 当进程数为 1 时,使用的队列自动修改为 Queue(当使用的 python 版本没有开启 sem_open 时使用,即无法使用多进程库 multiprocessing) 58 | > * V2.0.0.0 59 | > * [原程序 mpms](https://github.com/aploium/mpms) 60 | -------------------------------------------------------------------------------- /My_lib/w_mpms/w_lib/BLog.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | #coding=utf8 3 | """ 4 | # Author: meetbill 5 | # Created Time : 2016-08-01 10:59:26 6 | 7 | # File Name: blog.py 8 | # Description: 9 | 10 | """ 11 | import sys 12 | import logging 13 | from logging.handlers import RotatingFileHandler 14 | import os 15 | 16 | class ColoredFormatter(logging.Formatter): 17 | '''A colorful formatter.''' 18 | 19 | def __init__(self, fmt = None, datefmt = None): 20 | logging.Formatter.__init__(self, fmt, datefmt) 21 | # Color escape string 22 | COLOR_RED='\033[1;31m' 23 | COLOR_GREEN='\033[1;32m' 24 | COLOR_YELLOW='\033[1;33m' 25 | COLOR_BLUE='\033[1;34m' 26 | COLOR_PURPLE='\033[1;35m' 27 | COLOR_CYAN='\033[1;36m' 28 | COLOR_GRAY='\033[1;37m' 29 | COLOR_WHITE='\033[1;38m' 30 | COLOR_RESET='\033[1;0m' 31 | 32 | # Define log color 33 | self.LOG_COLORS = { 34 | 'DEBUG': '%s', 35 | 'INFO': COLOR_GREEN + '%s' + COLOR_RESET, 36 | 'WARNING': COLOR_YELLOW + '%s' + COLOR_RESET, 37 | 'ERROR': COLOR_RED + '%s' + COLOR_RESET, 38 | 'CRITICAL': COLOR_RED + '%s' + COLOR_RESET, 39 | 'EXCEPTION': COLOR_RED + '%s' + COLOR_RESET, 40 | } 41 | 42 | def format(self, record): 43 | level_name = record.levelname 44 | msg = logging.Formatter.format(self, record) 45 | 46 | return self.LOG_COLORS.get(level_name, '%s') % msg 47 | 48 | class Log(object): 49 | 50 | ''' 51 | log 52 | ''' 53 | def __init__(self, filename, level="debug", logid="meetbill", mbs=20, count=10, is_console=True): 54 | ''' 55 | mbs: how many MB 56 | count: the count of remain 57 | ''' 58 | try: 59 | self._level = level 60 | #print "init,level:",level,"\t","get_map_level:",self._level 61 | self._filename = filename 62 | self._logid = logid 63 | self._logger = logging.getLogger(self._logid) 64 | file_path = os.path.split(self._filename)[0] 65 | if not os.path.exists(file_path): 66 | os.makedirs(file_path) 67 | 68 | if not len(self._logger.handlers): 69 | self._logger.setLevel(self.get_map_level(self._level)) 70 | 71 | fmt = '[%(asctime)s] %(levelname)s %(message)s' 72 | datefmt = '%Y-%m-%d %H:%M:%S' 73 | formatter = logging.Formatter(fmt, datefmt) 74 | 75 | maxBytes = int(mbs) * 1024 * 1024 76 | file_handler = RotatingFileHandler(self._filename, mode='a',maxBytes=maxBytes,backupCount=count) 77 | self._logger.setLevel(self.get_map_level(self._level)) 78 | file_handler.setFormatter(formatter) 79 | self._logger.addHandler(file_handler) 80 | 81 | if is_console == True: 82 | stream_handler = logging.StreamHandler(sys.stderr) 83 | console_formatter = ColoredFormatter(fmt, datefmt) 84 | stream_handler.setFormatter(console_formatter) 85 | self._logger.addHandler(stream_handler) 86 | 87 | except Exception as expt: 88 | print expt 89 | 90 | def tolog(self, msg, level=None): 91 | try: 92 | level = level if level else self._level 93 | level = str(level).lower() 94 | level = self.get_map_level(level) 95 | if level == logging.DEBUG: 96 | self._logger.debug(msg) 97 | if level == logging.INFO: 98 | self._logger.info(msg) 99 | if level == logging.WARN: 100 | self._logger.warn(msg) 101 | if level == logging.ERROR: 102 | self._logger.error(msg) 103 | if level == logging.CRITICAL: 104 | self._logger.critical(msg) 105 | except Exception as expt: 106 | print expt 107 | 108 | def debug(self,msg): 109 | self.tolog(msg, level="debug") 110 | 111 | def info(self,msg): 112 | self.tolog(msg, level="info") 113 | 114 | def warn(self,msg): 115 | self.tolog(msg, level="warn") 116 | 117 | def error(self,msg): 118 | self.tolog(msg, level="error") 119 | 120 | def critical(self,msg): 121 | self.tolog(msg, level="critical") 122 | 123 | def get_map_level(self,level="debug"): 124 | level = str(level).lower() 125 | #print "get_map_level:",level 126 | if level == "debug": 127 | return logging.DEBUG 128 | if level == "info": 129 | return logging.INFO 130 | if level == "warn": 131 | return logging.WARN 132 | if level == "error": 133 | return logging.ERROR 134 | if level == "critical": 135 | return logging.CRITICAL 136 | 137 | 138 | if __name__ == "__main__": 139 | debug=True 140 | logpath = "/tmp/test.log" 141 | logger = Log(logpath,level="debug",logid="meetbill",is_console=debug, mbs=5, count=5) 142 | 143 | logstr="helloworld" 144 | logger.error(logstr) 145 | logger.info(logstr) 146 | logger.warn(logstr) 147 | -------------------------------------------------------------------------------- /My_lib/w_mpms/w_lib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/meetbill/MyPythonLib/fd58b8a112ad366fba67771cafd752f0b8f378ee/My_lib/w_mpms/w_lib/__init__.py -------------------------------------------------------------------------------- /My_lib/xmltodict/README.md: -------------------------------------------------------------------------------- 1 | ## xmltodict 2 | 3 | * [使用](#使用) 4 | * [xml 转 dict](#xml-转-dict) 5 | * [方法](#方法) 6 | * [字典和 json 的区别](#字典和-json-的区别) 7 | * [dict 转 xml](#dict-转-xml) 8 | * [范例](#范例) 9 | * [FAQ 及需注意内容](#faq-及需注意内容) 10 | * [xml 转 dict 时报错](#xml-转-dict-时报错) 11 | * [xml 转 dict 时注意](#xml-转-dict-时注意) 12 | 13 | 14 | # 使用 15 | 16 | ## xml 转 dict 17 | 18 | ### 方法 19 | 20 | parse 21 | 22 | **注**:输入值为 xml 字符串,需要去掉 xml 声明 (), 如果网页获取的 xml 含有声明可以用下面方法去掉声明 23 | 24 | ``` 25 | new_xml = old_xml.split('?>')[1] 26 | ``` 27 | 28 | ### 字典和 json 的区别 29 | 30 | 在 python 中,字典的输出内容跟 json 格式内容一样,但是字典的格式是字典,json 的格式是字符串,所以在传输的时候(特别是网页)要转换使用 31 | 32 | * 编码:把一个 Python 对象编码转换成 Json 字符串 ---json.dumps() 33 | * 解码:把 Json 格式字符串解码转换成 Python 对象 ---json.loads() 34 | 35 | ## dict 转 xml 36 | 37 | unparse 38 | 39 | ### 范例 40 | 41 | ``` 42 | import xmltodict 43 | 44 | 45 | try: # pragma no cover 46 | from collections import OrderedDict 47 | except ImportError: # pragma no cover 48 | try: 49 | from ordereddict import OrderedDict 50 | except ImportError: 51 | OrderedDict = dict 52 | 53 | data = OrderedDict() 54 | data["mydocument"] = OrderedDict() 55 | data["mydocument"]["@has"] = "an attribute" 56 | data["mydocument"]["and"] = OrderedDict() 57 | data["mydocument"]["and"]["many"] = [] 58 | data["mydocument"]["and"]["many"].append("elements") 59 | data["mydocument"]["and"]["many"].append("more elements") 60 | data["mydocument"]["plus"] = OrderedDict() 61 | data["mydocument"]["plus"]["@a"] = "complex" 62 | data["mydocument"]["plus"]["#text"] = "elements as well" 63 | 64 | print xmltodict.unparse(data,pretty=True) 65 | 66 | ``` 67 | 68 | # FAQ 及需注意内容 69 | 70 | ## xml 转 dict 时报错 71 | 72 | 报错如下 73 | 74 | ``` 75 | xml.parsers.expat.ExpatError: not well-formed (invalid token): line 11, column 47 76 | ``` 77 | 上面提示是在 xml 的第 11 行的第 47 个字符那有问题,查看的时候查看下上行是否有错误 78 | 79 | 检查下是否双引号使用了中文,xml 格式错误等等 80 | 81 | ## xml 转 dict 时注意 82 | 83 | 我在本地的操作如下 84 | 85 | ``` 86 | A 机器通过 dict 生成 xml,发送给机器 B 87 | B 机器接受到 xml 后转为 dict 88 | ``` 89 | 其实这个时候,A 的 dict 和 B 转后的 dict 是可能不一样的 90 | 91 | 例如下面场景 92 | ``` 93 | "ROW":[ 94 | { 95 | "STATUS": 2, 96 | "MSG": "ERR" 97 | } 98 | ] 99 | 100 | ``` 101 | 原字典中的 value 是个列表,但是列表中只有一项,转为 xml 然后转为 dict 时就会变为如下结果 102 | ``` 103 | "ROW":{ 104 | "STATUS": 2, 105 | "MSG": "ERR" 106 | } 107 | ``` 108 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MyPythonLib 2 | 3 | 4 | * [1 My_lib](#1-my_lib) 5 | * [1.1 终端界面 / 菜单相关](#11-终端界面--菜单相关) 6 | * [1.2 进程相关](#12-进程相关) 7 | * [1.3 配置文件及日志相关](#13-配置文件及日志相关) 8 | * [1.4 其他](#14-其他) 9 | * [1.5 Debug](#15-debug) 10 | * [1.6 DB/Redis](#16-dbredis) 11 | * [2 Example](#2-example) 12 | * [2.1 教程](#21-教程) 13 | * [2.2 相关笔记](#22-相关笔记) 14 | * [3 相关项目](#3-相关项目) 15 | * [4 参加步骤](#4-参加步骤) 16 | 17 | 18 | 19 | ## 1 My_lib 20 | 21 | ### 1.1 终端界面 / 菜单相关 22 | 23 | * Python 程序进度条 ---------------------------[progressbar](My_lib/progressbar) 24 | * Linux 终端表格 ------------------------------[ttable](My_lib/ttable) 25 | * 命令行执行加函数参数时,可以直接对函数操作 --[command](My_lib/command_utils/) 26 | * 终端 print 颜色 -----------------------------[color](My_lib/color/) 27 | 28 | ### 1.2 进程相关 29 | 30 | * 守护进程模板 --------------------------------[daemon](My_lib/daemon) 31 | * 检测某进程是否存在 --------------------------[monitor_process](My_lib/monitor_process) 32 | * Python 多进程 - 多线程任务队列 --------------[mpmt](https://github.com/meetbill/mpmt) 33 | * Python 调用 shell 库 ------------------------[easyrun](My_lib/easyrun/README.md) 34 | 35 | ### 1.3 配置文件及日志相关 36 | 37 | * 查看 Linux 信息和修改 Linux 配置文件 --------[serverinfo_config](My_lib/serverinfo_config) 38 | * 根据日志大小轮转日志 ------------------------[log](My_lib/log_utils/) 39 | * 对配置文件进行获取配置以及修改配置 ----------[file_utils](My_lib/file_utils/) 40 | * 参数检查 41 | * 函数参数检查 ------------------------------[schema](My_lib/schema) 42 | * 将 xml 转为字典或者字典转为 xml -------------[xmltodict](./My_lib/xmltodict/) 43 | 44 | ### 1.4 其他 45 | 46 | * 根据 IP 获取地址位置信息 --------------------[query_ip](My_lib/query_ip/) 47 | 48 | ### 1.5 Debug 49 | 50 | * 极简 DeBug 工具 PySnooper -------------------[PySnooper](https://github.com/cool-RR/PySnooper) 51 | 52 | ### 1.6 DB/Redis 53 | 54 | * 轻量化 MySQL orm ----------------------------[Peewee](My_lib/peewee) 55 | * Python 连接 MySQL 的库 PyMySQL --------------[PyMySQL](My_lib/pymysql) 56 | * 分析 Redis RDB 工具 -------------------------[redis-rdb-tools](My_lib/redis-rdb-tools) 57 | 58 | ## 2 Example 59 | 60 | ### 2.1 教程 61 | 62 | * [微软 Python 教程](https://github.com/microsoft/c9-python-getting-started) 63 | 64 | ### 2.2 相关笔记 65 | 66 | * [相关 wiki](https://github.com/meetbill/MyPythonLib/wiki) 67 | * [Python 基础学习](./Example/python_base/README.md) 68 | * [Python 交互模式自动补全](./Example/python_interactive/README.md) 69 | * [Python 统计文件夹中代码行数](./Example/python_count) 70 | * [Pytest](./My_lib/pytest) 71 | 72 | ## 3 相关项目 73 | 74 | > * [运维实践指南相关程序](https://github.com/meetbill/op_practice_code) 75 | > * [Linux 运维工具](https://github.com/meetbill/linux_tools) 76 | > * Python 常用库集合[星图](https://github.com/meetbill/x-lib) 77 | 78 | ## 4 参加步骤 79 | 80 | * 在 GitHub 上 `fork` 到自己的仓库,然后 `clone` 到本地,并设置用户信息。 81 | ``` 82 | $ git clone https://github.com/meetbill/MyPythonLib.git 83 | $ cd MyPythonLib 84 | $ git config user.name "yourname" 85 | $ git config user.email "your email" 86 | ``` 87 | * 修改代码后提交,并推送到自己的仓库。 88 | ``` 89 | $ #do some change on the content 90 | $ git commit -am "Fix issue #1: change helo to hello" 91 | $ git push 92 | ``` 93 | * 在 GitHub 网站上提交 pull request。 94 | * 定期使用项目仓库内容更新自己仓库内容。 95 | ``` 96 | $ git remote add upstream https://github.com/meetbill/MyPythonLib.git 97 | $ git fetch upstream 98 | $ git checkout master 99 | $ git rebase upstream/master 100 | $ git push -f origin master 101 | ``` 102 | --------------------------------------------------------------------------------