├── .gitignore ├── 115.py ├── 91porn.py ├── LICENSE ├── README.md ├── ThunderLixianExporter.user.js ├── bt.py ├── ed2k_search.py ├── flv_cmd.py ├── leetcode_problems.py ├── music.163.com.py ├── music.baidu.com.py ├── pan.baidu.com.py ├── tumblr.py ├── unzip.py ├── xiami.py └── yunpan.360.cn.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Distribution / packaging 9 | .Python 10 | env/ 11 | bin/ 12 | build/ 13 | develop-eggs/ 14 | dist/ 15 | eggs/ 16 | lib/ 17 | lib64/ 18 | parts/ 19 | sdist/ 20 | var/ 21 | *.egg-info/ 22 | .installed.cfg 23 | *.egg 24 | 25 | # Installer logs 26 | pip-log.txt 27 | pip-delete-this-directory.txt 28 | 29 | # Unit test / coverage reports 30 | htmlcov/ 31 | .tox/ 32 | .coverage 33 | .cache 34 | nosetests.xml 35 | coverage.xml 36 | 37 | # Translations 38 | *.mo 39 | 40 | # Mr Developer 41 | .mr.developer.cfg 42 | .project 43 | .pydevproject 44 | 45 | # Rope 46 | .ropeproject 47 | 48 | # Django stuff: 49 | *.log 50 | *.pot 51 | 52 | # Sphinx documentation 53 | docs/_build/ 54 | 55 | -------------------------------------------------------------------------------- /115.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # vim: set fileencoding=utf8 3 | 4 | import os 5 | import sys 6 | from getpass import getpass 7 | import requests 8 | import urllib 9 | import json 10 | import re 11 | import time 12 | import argparse 13 | import random 14 | import sha 15 | import select 16 | 17 | ############################################################ 18 | # wget exit status 19 | wget_es = { 20 | 0: "No problems occurred.", 21 | 2: "User interference.", 22 | 1<<8: "Generic error code.", 23 | 2<<8: "Parse error - for instance, when parsing command-line " \ 24 | "optio.wgetrc or .netrc...", 25 | 3<<8: "File I/O error.", 26 | 4<<8: "Network failure.", 27 | 5<<8: "SSL verification failure.", 28 | 6<<8: "Username/password authentication failure.", 29 | 7<<8: "Protocol errors.", 30 | 8<<8: "Server issued an error response." 31 | } 32 | ############################################################ 33 | 34 | # file extensions 35 | mediatype = [ 36 | ".wma", ".wav", ".mp3", ".aac", ".ra", ".ram", ".mp2", ".ogg", ".aif", 37 | ".mpega", ".amr", ".mid", ".midi", ".m4a", ".m4v", ".wmv", ".rmvb", 38 | ".mpeg4", ".mpeg2", ".flv", ".avi", ".3gp", ".mpga", ".qt", ".rm", 39 | ".wmz", ".wmd", ".wvx", ".wmx", ".wm", ".swf", ".mpg", ".mp4", ".mkv", 40 | ".mpeg", ".mov", ".mdf", ".iso", ".asf" 41 | ] 42 | 43 | s = '\x1b[%d;%dm%s\x1b[0m' # terminual color template 44 | 45 | cookie_file = os.path.join(os.path.expanduser('~'), '.115.cookies') 46 | 47 | headers = { 48 | "Accept":"Accept: application/json, text/javascript, */*; q=0.01", 49 | "Accept-Encoding":"text/html", 50 | "Accept-Language":"en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4,zh-TW;q=0.2", 51 | "Content-Type":"application/x-www-form-urlencoded; charset=UTF-8", 52 | "Referer":"http://m.115.com/", 53 | "X-Requested-With": "XMLHttpRequest", 54 | "User-Agent":"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 "\ 55 | "(KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36" 56 | } 57 | 58 | ss = requests.session() 59 | ss.headers.update(headers) 60 | 61 | class pan115(object): 62 | def __init__(self): 63 | self.download = self.play if args.play else self.download 64 | 65 | def init(self): 66 | if os.path.exists(cookie_file): 67 | try: 68 | t = json.loads(open(cookie_file).read()) 69 | ss.cookies.update(t.get('cookies', t)) 70 | if not self.check_login(): 71 | print s % (1, 91, ' !! cookie is invalid, please login\n') 72 | sys.exit(1) 73 | self.check_vip() 74 | except: 75 | g = open(cookie_file, 'w') 76 | g.close() 77 | print s % (1, 97, ' please login') 78 | sys.exit(1) 79 | else: 80 | print s % (1, 91, ' !! cookie_file is missing, please login') 81 | sys.exit(1) 82 | 83 | def check_vip(self): 84 | url = 'http://vip.115.com/?ac=mycouponcount' 85 | r = ss.get(url).content 86 | 87 | if '"vip":0' in r: 88 | self.is_vip = False 89 | else: 90 | self.is_vip = True 91 | 92 | def check_login(self): 93 | #print s % (1, 97, '\n -- check_login') 94 | url = 'http://msg.115.com/?ac=unread' 95 | j = ss.get(url) 96 | if '"code"' not in j.text: 97 | #print s % (1, 92, ' -- check_login success\n') 98 | self.save_cookies() 99 | return True 100 | else: 101 | print s % (1, 91, ' -- check_login fail\n') 102 | return False 103 | 104 | def login(self, account, password): 105 | print s % (1, 97, '\n -- login') 106 | 107 | def get_ssopw(ssoext): 108 | p = sha.new(password).hexdigest() 109 | a = sha.new(account).hexdigest() 110 | t = sha.new(p + a).hexdigest() 111 | ssopw = sha.new(t + ssoext.upper()).hexdigest() 112 | return ssopw 113 | 114 | ssoext = str(int(time.time()*1000)) 115 | ssopw = get_ssopw(ssoext) 116 | 117 | quote = urllib.quote 118 | data = quote("login[ssoent]")+"=B1&" + \ 119 | quote("login[version]")+"=2.0&" + \ 120 | quote("login[ssoext]")+"=%s&" % ssoext + \ 121 | quote("login[ssoln]")+"=%s&" % quote(account) + \ 122 | quote("login[ssopw]")+"=%s&" % ssopw + \ 123 | quote("login[ssovcode]")+"=%s&" % ssoext + \ 124 | quote("login[safe]")+"=1&" + \ 125 | quote("login[time]")+"=1&" + \ 126 | quote("login[safe_login]")+"=1&" + \ 127 | "goto=http://m.115.com/?ac=home" 128 | 129 | theaders = headers 130 | theaders["Referer"] = "http://passport.115.com\ 131 | /static/reg_login_130418/bridge.html?ajax_cb_key=bridge_%s" \ 132 | % int(time.time()*1000) 133 | 134 | # Post! 135 | # XXX : do not handle errors 136 | params = { 137 | 'ct': 'login', 138 | 'ac': 'ajax', 139 | 'is_ssl': 1 140 | } 141 | url = 'http://passport.115.com' 142 | ss.post(url, params=params, data=data, headers=theaders) 143 | self.save_cookies() 144 | 145 | def save_cookies(self): 146 | with open(cookie_file, 'w') as g: 147 | c = {'cookies': ss.cookies.get_dict()} 148 | g.write(json.dumps(c, indent=4, sort_keys=True)) 149 | 150 | def get_dlink(self, pc): 151 | params = { 152 | "pickcode": pc.encode('utf8'), 153 | "_": int(time.time()*1000), 154 | } 155 | url = 'http://web.api.115.com/files/download' 156 | r = ss.get(url, params=params) 157 | j = r.json() 158 | dlink = j['file_url'].encode('utf8') 159 | return dlink 160 | 161 | def _get_play_purl(self, pickcode): 162 | url = 'http://115.com/api/video/m3u8/%s.m3u8' % pickcode 163 | r = ss.get(url) 164 | c = r.content.strip() 165 | 166 | if c: 167 | purl = c.split()[-1] 168 | if 'http' not in purl: 169 | return None 170 | else: 171 | return purl 172 | else: 173 | return None 174 | 175 | def get_infos(self, cid): 176 | params = { 177 | "cid": cid, 178 | "offset": 0, 179 | "type": "", 180 | "limit": 10000, 181 | "format": "json", 182 | "aid": 1, 183 | "o": "file_name", 184 | "asc": 0, 185 | "show_dir": 1 186 | } 187 | 188 | url = 'http://web.api.115.com/files' 189 | j = ss.get(url, params=params).json() 190 | 191 | dir_loop1 = [{'dir': j['path'][-1]['name'], 'cid': j['cid']}] 192 | dir_loop2 = [] 193 | #base_dir = os.getcwd() 194 | while dir_loop1: 195 | for d in dir_loop1: 196 | params['cid'] = d['cid'] 197 | j = ss.get(url, params=params).json() 198 | if j['errNo'] == 0 and j['data']: 199 | if args.type_: 200 | j['data'] = [ 201 | x for x in j['data'] \ 202 | if x.get('ns') \ 203 | or x['ico'].lower() == unicode(args.type_.lower()) 204 | ] 205 | 206 | for i in j['data']: 207 | if i.get('ns'): 208 | item = { 209 | 'dir': os.path.join(d['dir'], i['ns']), 210 | 'cid': i['cid'] 211 | } 212 | dir_loop2.append(item) 213 | 214 | if args.play: 215 | j['data'] = [ 216 | i for i in j['data'] \ 217 | if i.get('sha') \ 218 | and os.path.splitext(i['n'])[-1].lower() \ 219 | in mediatype 220 | ] 221 | 222 | total_file = len([i for i in j['data'] if not i.get('ns')]) 223 | if args.from_ - 1: 224 | j['data'] = j['data'][args.from_-1:] if args.from_ \ 225 | else j['data'] 226 | nn = args.from_ 227 | for i in j['data']: 228 | if not i.get('ns'): 229 | t = i['n'] 230 | t = os.path.join(d['dir'], t).encode('utf8') 231 | t = os.path.join(os.getcwd(), t) 232 | infos = { 233 | 'file': t, 234 | 'dir_': os.path.split(t)[0], 235 | 'dlink': self.get_dlink(i['pc']), 236 | 'name': i['n'].encode('utf8'), 237 | #'purl': self._get_play_purl( 238 | # i['pc'].encode('utf8')) \ 239 | # if args.play and self.is_vip else None, 240 | 'purl': self._get_play_purl( 241 | i['pc'].encode('utf8')) \ 242 | if args.play else None, 243 | 'nn': nn, 244 | 'total_file': total_file 245 | } 246 | nn += 1 247 | self.download(infos) 248 | else: 249 | print s % (1, 91, ' error: get_infos') 250 | sys.exit(0) 251 | dir_loop1 = dir_loop2 252 | dir_loop2 = [] 253 | 254 | 255 | @staticmethod 256 | def download(infos): 257 | ## make dirs 258 | if not os.path.exists(infos['dir_']): 259 | os.makedirs(infos['dir_']) 260 | else: 261 | if os.path.exists(infos['file']): 262 | return 0 263 | 264 | num = random.randint(0, 7) % 8 265 | col = s % (2, num + 90, infos['file']) 266 | infos['nn'] = infos['nn'] if infos.get('nn') else 1 267 | infos['total_file'] = infos['total_file'] \ 268 | if infos.get('total_file') else 1 269 | print '\n ++ 正在下载: #', \ 270 | s % (1, 97, infos['nn']), \ 271 | '/', s % (1, 97, infos['total_file']), \ 272 | '#', col 273 | 274 | if args.aria2c: 275 | # 115 普通用户只能有4下载通道。 276 | quiet = ' --quiet=true' if args.quiet else '' 277 | taria2c = ' -x %s -s %s' % (args.aria2c, args.aria2c) 278 | tlimit = ' --max-download-limit %s' \ 279 | % args.limit if args.limit else '' 280 | cmd = 'aria2c -c%s%s%s ' \ 281 | '-m 0 ' \ 282 | '-o "%s.tmp" -d "%s" ' \ 283 | '--user-agent "%s" ' \ 284 | '--header "Referer:http://m.115.com/" "%s"' \ 285 | % (quiet, taria2c, tlimit, infos['name'], infos['dir_'],\ 286 | headers['User-Agent'], infos['dlink']) 287 | else: 288 | tlimit = ' --limit-rate %s' % args.limit if args.limit else '' 289 | cmd = 'wget -c%s ' \ 290 | '-O "%s.tmp" --user-agent "%s" ' \ 291 | '--header "Referer:http://m.115.com/" "%s"' \ 292 | % (tlimit, infos['file'], headers['User-Agent'], 293 | infos['dlink']) 294 | 295 | status = os.system(cmd) 296 | if status != 0: # other http-errors, such as 302. 297 | wget_exit_status_info = wget_es[status] 298 | print('\n\n ---### \x1b[1;91mERROR\x1b[0m ==> '\ 299 | '\x1b[1;91m%d (%s)\x1b[0m ###--- \n\n' \ 300 | % (status, wget_exit_status_info)) 301 | print s % (1, 91, ' ===> '), cmd 302 | sys.exit(1) 303 | else: 304 | os.rename('%s.tmp' % infos['file'], infos['file']) 305 | 306 | @staticmethod 307 | def play(infos): 308 | num = random.randint(0, 7) % 8 309 | col = s % (2, num + 90, infos['name']) 310 | infos['nn'] = infos['nn'] if infos.get('nn') else 1 311 | infos['total_file'] = infos['total_file'] \ 312 | if infos.get('total_file') else 1 313 | print '\n ++ play: #', \ 314 | s % (1, 97, infos['nn']), '/', \ 315 | s % (1, 97, infos['total_file']), \ 316 | '#', col 317 | 318 | if not infos['purl']: 319 | print s % (1, 91, ' |-- m3u8 is not ready, using dlink') 320 | infos['purl'] = infos['dlink'] 321 | 322 | cmd = 'mpv --really-quiet --cache 8140 --cache-default 8140 ' \ 323 | '--http-header-fields "user-agent:%s" '\ 324 | '--http-header-fields "Referer:http://m.115.com" "%s"' \ 325 | % (headers['User-Agent'], infos['purl']) 326 | 327 | status = os.system(cmd) 328 | timeout = 1 329 | ii, _, _ = select.select([sys.stdin], [], [], timeout) 330 | if ii: 331 | sys.exit(0) 332 | else: 333 | pass 334 | 335 | # TODO 336 | def exists(self, filepath): 337 | pass 338 | 339 | # TODO 340 | def upload(self, path, dir_): 341 | pass 342 | 343 | def addtask(self, u): 344 | # get uid 345 | url = 'http://my.115.com/?ct=ajax&ac=get_user_aq' 346 | r = ss.get(url) 347 | j = r.json() 348 | uid = j['data']['uid'] 349 | 350 | # get sign, time 351 | url = 'http://115.com/?ct=offline&ac=space' 352 | r = ss.get(url) 353 | j = r.json() 354 | sign = j['sign'] 355 | tm = j['time'] 356 | 357 | # now, add task 358 | data = { 359 | 'url': urllib.quote_plus(u), 360 | 'uid': uid, 361 | 'sign': sign, 362 | 'time': str(tm) 363 | } 364 | url = 'http://115.com/lixian/?ct=lixian&ac=add_task_url' 365 | r = ss.post(url, data=data) 366 | if not r.ok: 367 | print s % (1, 91, ' !! Error at addtask') 368 | print r.content 369 | sys.exit(1) 370 | 371 | j = r.json() 372 | if j['info_hash']: 373 | print s % (1, 92, ' ++ add task success.') 374 | else: 375 | print s % (2, 91, ' !! Error: %s' % j['error_msg']) 376 | sys.exit() 377 | 378 | data = { 379 | 'page': 1, 380 | 'uid': uid, 381 | 'sign': sign, 382 | 'time': str(tm) 383 | } 384 | url = 'http://115.com/lixian/?ct=lixian&ac=task_lists' 385 | r = ss.post(url, data=data) 386 | j = r.json() 387 | percentDone = j['tasks'][0]['percentDone'] 388 | print s % (1, 97, ' ++ %s' % j['tasks'][0]['name']) 389 | print s % (1, 92, ' %s%s Done' % (percentDone, '%')) 390 | 391 | def do(self, pc): 392 | dlink = self.get_dlink(pc) 393 | name = re.search(r'/([^/]+?)\?', dlink).group(1) 394 | name = urllib.unquote_plus(name) 395 | t = os.path.join(os.getcwd(), name) 396 | infos = { 397 | 'file': t, 398 | 'dir_': os.path.split(t)[0], 399 | 'dlink': dlink, 400 | #'purl': self._get_play_purl(pc) \ 401 | # if args.play and self.is_vip else None, 402 | 'purl': self._get_play_purl(pc) if args.play else None, 403 | 'name': name, 404 | 'nn': 1, 405 | 'total_file': 1 406 | } 407 | self.download(infos) 408 | 409 | def main(argv): 410 | if len(argv) <= 1: 411 | sys.exit() 412 | 413 | ###################################################### 414 | # for argparse 415 | p = argparse.ArgumentParser( 416 | description='download from 115.com reversely') 417 | p.add_argument('xxx', type=str, nargs='*', \ 418 | help='命令对象.') 419 | p.add_argument('-a', '--aria2c', action='store', default=None, \ 420 | type=int, help='aria2c分段下载数量') 421 | p.add_argument('-p', '--play', action='store_true', \ 422 | help='play with mpv') 423 | p.add_argument('-q', '--quiet', action='store_true', \ 424 | help='quiet for download and play') 425 | p.add_argument('-f', '--from_', action='store', \ 426 | default=1, type=int, \ 427 | help='从第几个开始下载,eg: -f 42') 428 | p.add_argument('-t', '--type_', action='store', \ 429 | default=None, type=str, \ 430 | help='要下载的文件的后缀,eg: -t mp3') 431 | p.add_argument('-l', '--limit', action='store', \ 432 | default=None, type=str, help='下载速度限制,eg: -l 100k') 433 | p.add_argument('-d', '--addtask', action='store_true', \ 434 | help='加离线下载任务') 435 | global args 436 | args = p.parse_args(argv[1:]) 437 | xxx = args.xxx 438 | 439 | if xxx[0] == 'login' or xxx[0] == 'g': 440 | if len(xxx[1:]) < 1: 441 | account = raw_input(s % (1, 97, ' account: ')) 442 | password = getpass(s % (1, 97, 'password: ')) 443 | elif len(xxx[1:]) == 1: 444 | account = xxx[1] 445 | password = getpass(s % (1, 97, ' password: ')) 446 | elif len(xxx[1:]) == 2: 447 | account = xxx[1] 448 | password = xxx[2] 449 | else: 450 | print s % (1, 91, ' login\n login account\n \ 451 | login account password') 452 | 453 | x = pan115() 454 | x.login(account, password) 455 | is_signin = x.check_login() 456 | if is_signin: 457 | print s % (1, 92, ' ++ login succeeds.') 458 | else: 459 | print s % (1, 91, ' login failes') 460 | 461 | elif xxx[0] == 'signout': 462 | g = open(cookie_file, 'w') 463 | g.close() 464 | 465 | else: 466 | x = pan115() 467 | x.init() 468 | for url in xxx: 469 | if 'pickcode' in url: 470 | pc = re.search(r'pickcode=([\d\w]+)', url) 471 | if pc: 472 | pc = pc.group(1) 473 | x.do(pc) 474 | else: 475 | print s % (1, 91, ' can\'t find pickcode.') 476 | elif 'cid=' in url: 477 | cid = re.search(r'cid=(\d+)', url) 478 | cid = cid.group(1) if cid else '0' 479 | x.get_infos(cid) 480 | elif args.addtask: 481 | x.addtask(url) 482 | else: 483 | print s % (2, 91, ' 请正确输入自己的115地址。') 484 | 485 | if __name__ == '__main__': 486 | argv = sys.argv 487 | main(argv) 488 | -------------------------------------------------------------------------------- /91porn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # vim: set fileencoding=utf8 3 | 4 | import os 5 | import sys 6 | import requests 7 | import urlparse 8 | import re 9 | import argparse 10 | import random 11 | import select 12 | import urllib2 13 | 14 | ############################################################ 15 | # wget exit status 16 | wget_es = { 17 | 0: "No problems occurred.", 18 | 2: "User interference.", 19 | 1<<8: "Generic error code.", 20 | 2<<8: "Parse error - for instance, when parsing command-line " \ 21 | "optio.wgetrc or .netrc...", 22 | 3<<8: "File I/O error.", 23 | 4<<8: "Network failure.", 24 | 5<<8: "SSL verification failure.", 25 | 6<<8: "Username/password authentication failure.", 26 | 7<<8: "Protocol errors.", 27 | 8<<8: "Server issued an error response." 28 | } 29 | ############################################################ 30 | 31 | s = '\x1b[%d;%dm%s\x1b[0m' # terminual color template 32 | 33 | headers = { 34 | "Accept":"text/html,application/xhtml+xml,application/xml; " \ 35 | "q=0.9,image/webp,*/*;q=0.8", 36 | "Accept-Encoding":"text/html", 37 | "Accept-Language":"en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4,zh-TW;q=0.2", 38 | "Content-Type":"application/x-www-form-urlencoded", 39 | "User-Agent":"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 " \ 40 | "(KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36" 41 | } 42 | 43 | ss = requests.session() 44 | ss.headers.update(headers) 45 | 46 | class nrop19(object): 47 | def __init__(self, url=None): 48 | self.url = url 49 | self.download = self.play if args.play else self.download 50 | 51 | def get_infos(self): 52 | r = ss.get(self.url) 53 | if r.ok: 54 | n1 = re.search(r'so.addVariable\(\'file\',\'(\d+)\'', r.content) 55 | n2 = re.search(r'so.addVariable\(\'seccode\',\'(.+?)\'', r.content) 56 | n3 = re.search(r'so.addVariable\(\'max_vid\',\'(\d+)\'', r.content) 57 | 58 | if n1 and n2 and n3: 59 | apiurl = 'http://%s/getfile.php' \ 60 | % urlparse.urlparse(self.url).hostname 61 | 62 | params = { 63 | 'VID': n1.group(1), 64 | 'mp4': '1', 65 | 'seccode': n2.group(1), 66 | 'max_vid': n3.group(1), 67 | } 68 | 69 | #tapiurl = apiurl + '?' + \ 70 | #'&'.join(['='.join(item) for item in params.items()]) 71 | #print tapiurl 72 | 73 | r = requests.get(apiurl, params=params) 74 | if r.ok: 75 | dlink = re.search( 76 | r'file=(http.+?)&', r.content).group(1) 77 | dlink = urllib2.unquote(dlink) 78 | name = re.search( 79 | r'viewkey=([\d\w]+)', self.url).group(1) 80 | infos = { 81 | 'name': '%s.mp4' % name, 82 | 'file': os.path.join(os.getcwd(), '%s.mp4' % name), 83 | 'dir_': os.getcwd(), 84 | 'dlink': dlink, 85 | } 86 | if not args.get_url: 87 | self.download(infos) 88 | else: 89 | print dlink 90 | else: 91 | print s % (1, 91, ' Error at get(apiurl)') 92 | else: 93 | print s % (1, 91, ' You are blocked') 94 | 95 | def download(self, infos): 96 | num = random.randint(0, 7) % 7 97 | col = s % (2, num + 90, infos['file']) 98 | print '\n ++ 正在下载: %s' % col 99 | 100 | cookies = '; '.join( 101 | ['%s=%s' % (i, ii) for i, ii in ss.cookies.items()]) 102 | if args.aria2c: 103 | cmd = 'aria2c -c -x10 -s10 ' \ 104 | '-o "%s.tmp" -d "%s" --header "User-Agent: %s" ' \ 105 | '--header "Cookie: %s" "%s"' \ 106 | % (infos['name'], infos['dir_'], \ 107 | headers['User-Agent'], cookies, infos['dlink']) 108 | else: 109 | cmd = 'wget -c -O "%s.tmp" --header "User-Agent: %s" ' \ 110 | '--header "Cookie: %s" "%s"' \ 111 | % (infos['file'], headers['User-Agent'], cookies, infos['dlink']) 112 | 113 | status = os.system(cmd) 114 | if status != 0: # other http-errors, such as 302. 115 | wget_exit_status_info = wget_es[status] 116 | print('\n\n ----### \x1b[1;91mERROR\x1b[0m ==> '\ 117 | '\x1b[1;91m%d (%s)\x1b[0m ###--- \n\n' \ 118 | % (status, wget_exit_status_info)) 119 | print s % (1, 91, ' ===> '), cmd 120 | sys.exit(1) 121 | else: 122 | os.rename('%s.tmp' % infos['file'], infos['file']) 123 | 124 | def play(self, infos): 125 | num = random.randint(0, 7) % 7 126 | col = s % (2, num + 90, infos['name']) 127 | print '\n ++ play: %s' % col 128 | 129 | cmd = 'mpv --really-quiet --cache 8140 --cache-default 8140 ' \ 130 | '--http-header-fields "user-agent:%s" "%s"' \ 131 | % (headers['User-Agent'], infos['dlink']) 132 | 133 | os.system(cmd) 134 | timeout = 1 135 | ii, _, _ = select.select([sys.stdin], [], [], timeout) 136 | if ii: 137 | sys.exit(0) 138 | else: 139 | pass 140 | 141 | def do(self): 142 | self.get_infos() 143 | 144 | def main(url): 145 | if args.proxy: 146 | ss.proxies = { 147 | 'http': args.proxy, 148 | 'https': args.proxy 149 | } 150 | x = nrop19(url) 151 | x.do() 152 | 153 | if __name__ == '__main__': 154 | p = argparse.ArgumentParser( 155 | description='download from 91porn.com') 156 | p.add_argument('url', help='url of 91porn.com') 157 | p.add_argument('-a', '--aria2c', action='store_true', \ 158 | help='download with aria2c') 159 | p.add_argument('-p', '--play', action='store_true', \ 160 | help='play with mpv') 161 | p.add_argument('-u', '--get_url', action='store_true', \ 162 | help='print download_url without download') 163 | p.add_argument('--proxy', action='store', type=str, default=None, \ 164 | help='print download_url without download') 165 | args = p.parse_args() 166 | main(args.url) 167 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2014 PeterDing 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # iScript 2 | 3 | ## pan.baidu.com.py 已经重构,不再维护 4 | 5 | [**BaiduPCS-Py**](https://github.com/PeterDing/BaiduPCS-Py) 是 pan.baidu.com.py 的重构版,运行在 Python >= 3.6 6 | 7 | [![Join the chat at https://gitter.im/PeterDing/iScript](https://badges.gitter.im/PeterDing/iScript.svg)](https://gitter.im/PeterDing/iScript?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) 8 | 9 | > *[L]* *[W]* *[LW]* 分别表示,在linux, windows, linux和windows 下通过测试。 10 | 11 | 12 | > ***windows用户可在babun (https://github.com/babun/babun) 下运行。*** 13 | 14 | 15 | *[L]* - [leetcode_problems.py](#leetcode_problems.py) - 下载Leetcode的算法题 16 | *[L]* - [xiami.py](#xiami.py) - 下载或播放高品质虾米音乐(xiami.com) 17 | *[L]* - [pan.baidu.com.py](#pan.baidu.com.py) - 百度网盘的下载、离线下载、上传、播放、转存、文件操作 18 | *[L]* - [bt.py](#bt.py) - magnet torrent 互转、及 过滤敏.感.词 19 | *[L]* - [115.py](#115.py) - 115网盘的下载和播放 20 | *[L]* - [yunpan.360.cn.py](#yunpan.360.cn.py) - 360网盘的下载 21 | *[L]* - [music.baidu.com.py](#music.baidu.com.py) - 下载或播放高品质百度音乐(music.baidu.com) 22 | *[L]* - [music.163.com.py](#music.163.com.py) - 下载或播放高品质网易音乐(music.163.com) 23 | *[L]* - [flv_cmd.py](#flv_cmd.py) - 基于在线服务的视频解析 client - 支持下载、播放 24 | *[L]* - [tumblr.py](#tumblr.py) - 下载某个tumblr.com的所有图片、视频、音频 25 | *[L]* - [unzip.py](#unzip.py) - 解决linux下unzip乱码的问题 26 | *[L]* - [ed2k_search.py](#ed2k_search.py) - 基于 donkey4u.com 的emule搜索 27 | *[L]* - [91porn.py](#91porn.py) - 下载或播放91porn 28 | *[L]* - [ThunderLixianExporter.user.js](#ThunderLixianExporter.user.js) - A fork of https://github.com/binux/ThunderLixianExporter - 增加了mpv和mplayer的导出。 29 | 30 | --- 31 | 32 | 33 | ### leetcode_problems.py - 下载Leetcode的算法题 34 | 35 | #### 依赖 36 | 37 | ``` 38 | python2-requests (https://github.com/kennethreitz/requests) 39 | 40 | python2-lxml 41 | 42 | ``` 43 | 44 | #### 参数: 45 | 46 | ``` 47 | --index sort by index 48 | --level sort by level 49 | --tag sort by tag 50 | --title sort by title 51 | --rm_blank 移除题中的空行 52 | --line LINE 两题之间的空行 53 | -r, --redownload 重新下载数据 54 | ``` 55 | 56 | 下载的数据保持在 ./leecode_problems.pk 57 | 转成的txt在 './leecode problems.txt' 58 | 59 | --- 60 | 61 | 62 | ### xiami.py - 下载或播放高品质虾米音乐(xiami.com) 63 | 64 | #### 1. 依赖 65 | 66 | ``` 67 | wget 68 | 69 | python2-requests (https://github.com/kennethreitz/requests) 70 | 71 | python2-mutagen (https://code.google.com/p/mutagen/) 72 | 73 | mpv (http://mpv.io) 74 | ``` 75 | 76 | #### 2. 使用说明 77 | 78 | xiami.py 是一个虾米音乐的命令行(CLI)客户端。提供登录、下载、播放、收藏的功能。 79 | 80 | **提供对[落网 luoo.net](http://www.luoo.net)的分析** 81 | 82 | 初次使用需要登录 xm login (原xiami账号) 83 | 84 | ~~**支持淘宝账户** xm logintaobao~~ 85 | 86 | ~~**对于淘宝账户,登录后只保存有关虾米的cookies,删除了有关淘宝的cookies**~~ 87 | 88 | **淘宝登录加密算法无法破解,需要手动获取cookies (方法见下 手动添加cookie登录)** 89 | 90 | **vip账户**支持高品质音乐的下载和播放。 91 | 92 | **原虾米vip用户如果不能获得高品质音乐,请用关联的淘宝帐号登录。** 93 | 94 | 下载的MP3默认添加id3 tags,保存在当前目录下。 95 | 96 | cookies保存在 ~/.Xiami.cookies。 97 | 98 | 关于播放操作: 99 | 100 | > 在运行脚本的终端,输入1次Enter,关闭当前播放并播放下一个文件,连续输入2次Enter,关闭当前播放并退出。 101 | 102 | #### 命令: 103 | 104 | ``` 105 | # 虾米账号登录 106 | g 107 | login 108 | login username 109 | login username password 110 | 111 | signout # 退出登录 112 | 113 | d 或 download url1 url2 # 下载 114 | p 或 play url1 url2 # 播放 115 | s 或 save url1 url2 # 收藏 116 | ``` 117 | 118 | #### 参数: 119 | 120 | ``` 121 | -p, --play 按顺序播放 122 | -pp 按歌曲被播放的次数,从高到低播放 123 | -l, --low 低品质mp3 124 | -d, --undescription 不加入disk的描述 125 | -f num, --from_ num 从第num个开始 126 | -t TAGS, --tags TAGS 收藏用的tags,用英文逗号分开, eg: -t piano,cello,guitar 127 | -n, --undownload 不下载,用于修改已存在的MP3的id3 tags 128 | ``` 129 | 130 | #### 3. 用法 131 | 132 | xm 是xiami.py的马甲 (alias xm='python2 /path/to/xiami.py') 133 | 134 | ``` 135 | # 登录 136 | xm g 137 | xm login 138 | xm login username 139 | xm login username password 140 | 141 | # 手动添加cookie登录 142 | 1. 用浏览器登录后,按F12,然后访问 https://www.xiami.com/album/123456 143 | 2. 选择‘网络’或network,找到 123456,在其中找到 Cookie: xxx 144 | 3. 然后在终端运行 xm g "xxx" 145 | 146 | # 退出登录 147 | xm signout 148 | 149 | # 下载专辑 150 | xm d http://www.xiami.com/album/168709?spm=a1z1s.6928801.1561534521.114.ShN6mD 151 | 152 | # 下载单曲 153 | xm d http://www.xiami.com/song/2082998?spm=a1z1s.6659513.0.0.DT2j7T 154 | 155 | # 下载精选集 156 | xm d http://www.xiami.com/song/showcollect/id/30374035?spm=a1z1s.3061701.6856305.16.fvh75t 157 | 158 | # 下载该艺术家所有专辑, Top 20 歌曲, radio 159 | xm d http://www.xiami.com/artist/23460?spm=a1z1s.6928801.1561534521.115.ShW08b 160 | 161 | # 下载用户的收藏, 虾米推荐, radio, 推荐 162 | xm d http://www.xiami.com/u/141825?spm=a1z1s.3521917.0.0.zI0APP 163 | 164 | # 下载排行榜 165 | xm d http://www.xiami.com/chart/index/c/2?spm=a1z1s.2943549.6827465.6.VrEAoY 166 | 167 | # 下载 风格 genre, radio 168 | xm d http://www.xiami.com/genre/detail/gid/2?spm=a1z1s.3057857.6850221.1.g9ySan 169 | xm d http://www.xiami.com/genre/detail/sid/2970?spm=a1z1s.3057857.6850221.4.pkepgt 170 | 171 | # 下载 widget (虾米播播) 172 | xm d http://www.xiami.com/widget/player-multi?uid=4350663&sid=1774531852,378713,3294421,1771778464,378728,378717,378727,1773346501,&width=990&height=346&mainColor=e29833&backColor=60362a&widget_from=4350663 173 | 174 | # 下载落网期刊 175 | # 分析落网期刊的音乐后,在虾米上搜索并下载 176 | xm d http://www.luoo.net/music/706 177 | ``` 178 | 179 | #### 播放: 180 | 181 | ``` 182 | # url 是上面的 183 | xm p url 184 | ``` 185 | 186 | #### 收藏: 187 | 188 | ``` 189 | xm s http://www.xiami.com/album/168709?spm=a1z1s.6928801.1561534521.114.ShN6mD 190 | xm s -t 'tag1,tag 2,tag 3' http://www.xiami.com/song/2082998?spm=a1z1s.6659513.0.0.DT2j7T 191 | xm s http://www.xiami.com/song/showcollect/id/30374035?spm=a1z1s.3061701.6856305.16.fvh75t 192 | xm s http://www.xiami.com/artist/23460?spm=a1z1s.6928801.1561534521.115.ShW08b 193 | ``` 194 | 195 | #### 4. 参考: 196 | 197 | > http://kanoha.org/2011/08/30/xiami-absolute-address/ 198 | 199 | 200 | > http://www.blackglory.me/xiami-vip-audition-with-no-quality-difference-between-downloading/ 201 | 202 | 203 | > https://gist.github.com/lepture/1014329 204 | 205 | 206 | > 淘宝登录代码: https://github.com/ly0/xiami-tools 207 | 208 | --- 209 | 210 | 211 | ### pan.baidu.com.py - 百度网盘的下载、离线下载、上传、播放、转存、文件操作 212 | 213 | **pan.baidu.com.py 已经重构,不再维护** 214 | 215 | [**BaiduPCS-Py**](https://github.com/PeterDing/BaiduPCS-Py) 是 pan.baidu.com.py 的重构版,运行在 Python >= 3.6 216 | 217 | #### 1. 依赖 218 | 219 | ``` 220 | wget 221 | 222 | aria2 (~ 1.18) 223 | 224 | aget-rs (https://github.com/PeterDing/aget-rs/releases) 225 | 226 | pip2 install rsa pyasn1 requests requests-toolbelt 227 | 228 | mpv (http://mpv.io) 229 | 230 | # 可选依赖 231 | shadowsocks # 用于加密上传。 232 | # 用 python2 的 pip 安装 233 | pip2 install shadowsocks 234 | 235 | # 除了用pip安装包,还可以手动: 236 | https://github.com/PeterDing/iScript/wiki/%E6%89%8B%E5%8A%A8%E8%A7%A3%E5%86%B3pan.baidu.com.py%E4%BE%9D%E8%B5%96%E5%8C%85 237 | ``` 238 | 239 | #### other 240 | 241 | [尝试解决百度网盘下载速度问题](https://github.com/PeterDing/iScript/wiki/解决百度网盘下载速度问题) 242 | 243 | #### 2. 使用说明 244 | 245 | pan.baidu.com.py 是一个百度网盘的命令行客户端。 246 | 247 | 初次使用需要登录 bp login 248 | 249 | **支持多帐号登录** 250 | 251 | **现在只支持[用cookie登录](#cookie_login)** 252 | 253 | **支持cookie登录** 254 | 255 | **支持加密上传**, 需要 shadowsocks 256 | 257 | **cd, ls 功能完全支持** 258 | 259 | **所有路径可以是 相对路径 或 绝对路径** 260 | 261 | 他人分享的网盘连接,只支持单个的下载。 262 | 263 | 下载工具默认为wget, 可用参数-a num选用aria2 264 | 265 | **支持用 aget 加速下载, 用法见下** 266 | 267 | 下载的文件,保存在当前目录下。 268 | 269 | 下载默认为非递归,递归下载加 -R 270 | 271 | 搜索时,默认在 cwd 272 | 273 | 搜索支持高亮 274 | 275 | 上传模式默认是 c (续传)。 276 | 277 | **开启证实(verification) 用参数 -V** 278 | 279 | 理论上,上传的单个文件最大支持 2T 280 | 281 | cookies保存在 ~/.bp.cookies 282 | 283 | 上传数据保存在 ~/.bp.pickle 284 | 285 | 关于播放操作: 286 | 287 | > 在运行脚本的终端,输入1次Enter,关闭当前播放并播放下一个文件,连续输入2次Enter,关闭当前播放并退出。 288 | 289 | 290 | #### 命令: 291 | 292 | **!!注意:** 293 | **命令参数中,所有网盘的路径和本地路径可以是 相对路径 或 绝对路径** 294 | 295 | ``` 296 | # 登录 297 | g 298 | login 299 | login username 300 | login username password 301 | login username cookie 302 | 303 | # 删除帐号 304 | userdelete 或 ud 305 | 306 | # 切换帐号 307 | userchange 或 uc 308 | 309 | # 帐号信息 310 | user 311 | 312 | # 显示当前工作目录 313 | cwd 314 | 315 | # 切换当前工作目录 316 | cd path # 支持 ./../... 317 | 318 | # 播放 319 | p 或 play url1 url2 path1 path2 320 | 321 | # 上传 322 | u 或 upload localpath remotepath 323 | 324 | # 加密上传 325 | u localpath remotepath [-P password] -t ec -R 326 | 327 | # 转存 328 | s 或 save url remotepath [-s secret] 329 | 330 | # 下载 331 | d 或 download url1 url2 path1 path2 非递归下载 到当前本地目录 332 | d 或 download url1 url2 path1 path2 -R 递归下载 到当前本地目录 333 | # !! 注意: 334 | # d /path/to/download -R 递归下载 *download文件夹* 到当前本地目录 335 | # d /path/to/download/ -R 递归下载 *download文件夹中的文件* 到当前本地目录 336 | 337 | # 下载并解密 338 | d /path/to/download -R -t dc [-P password] [-m aes-256-cfb] 339 | 340 | # 解密已下载的文件 341 | dc path1 path2 -R [-P password] [-m aes-256-cfb] 342 | 343 | # 文件操作 344 | md 或 mkdir path1 path2 创建文件夹 345 | rn 或 rename path new_path 重命名 346 | rm 或 remove path1 path2 删除 347 | mv 或 move path1 path2 /path/to/directory 移动 348 | cp 或 copy path /path/to/directory_or_file 复制 349 | cp 或 copy path1 path2 /path/to/directory 复制 350 | 351 | # 使用正则表达式进行文件操作 352 | rnr 或 rnre foo bar dir1 dir2 -I re1 re2 重命名文件夹中的文件名 353 | rmr 或 rmre dir1 dir2 -E re1 re2 删除文件夹下匹配到的文件 354 | mvr 或 mvre dir1 dir2 /path/to/dir -H head1 head2 移动文件夹下匹配到的文件 355 | cpr 或 cpre dir1 dir2 /path/to/dir -T tail1 tail2 复制文件夹下匹配到的文件 356 | # 递归加 -R 357 | # rmr, mvr, cpr 中 -t, -I, -E, -H, -T 至少要有一个,放在命令行末尾 358 | # -I, -E, -H, -T 后可跟多个匹配式 359 | # 可以用 -t 指定操作的文件类型 360 | -t f # 文件 361 | -t d # 文件夹 362 | # rnr 中 foo bar 都是 regex 363 | # -y, --yes # 不显示警示,直接进行。 !!注意,除非你知道你做什么,否则请不要使用。 364 | rmr / -I '.*' -y # !! 删除网盘中的所有文件 365 | 366 | # 回复用bt.py做base64加密的文件 367 | rnr /path/to/decode1 /path/to/decode2 -t f,bd64 368 | 369 | # 搜索 370 | # directory 必须是绝对路径, 默认是 cwd 371 | f 或 find keyword1 keyword2 [directory] 非递归搜索 372 | ff keyword1 keyword2 [directory] 非递归搜索 反序 373 | ft keyword1 keyword2 [directory] 非递归搜索 by time 374 | ftt keyword1 keyword2 [directory] 非递归搜索 by time 反序 375 | fs keyword1 keyword2 [directory] 非递归搜索 by size 376 | fss keyword1 keyword2 [directory] 非递归搜索 by size 反序 377 | fn keyword1 keyword2 [directory] 非递归搜索 by name 378 | fnn keyword1 keyword2 [directory] 非递归搜索 by name 反序 379 | # 递归搜索加 -R 380 | f 'ice and fire' /doc -R 381 | # 搜索所有的账户加 -t all 382 | f keyword1 keyword2 [directory] -t all -R 383 | f keyword1 keyword2 [directory] -t f,all -R 384 | # directory 默认为 / 385 | # 关于-H, -T, -I, -E 386 | # -I, -E, -H, -T 后可跟多个匹配式, 需要放在命令行末尾 387 | f keyword1 keyword2 [directory] -H head -T tail -I "re(gul.*) ex(p|g)ress$" 388 | f keyword1 keyword2 [directory] -H head -T tail -E "re(gul.*) ex(p|g)ress$" 389 | # 搜索 加 通道(只支持 donwload, play, rnre, rm, mv) 390 | f keyword1 keyword2 [directory] \| d -R 递归搜索后递归下载 391 | ftt keyword1 keyword2 [directory] \| p -R 递归搜索(by time 反序)后递归播放 392 | f keyword1 keyword2 [directory] \| rnr foo bar -R 递归搜索后rename by regex 393 | f keyword1 keyword2 [directory] \| rm -R -T tail 递归搜索后删除 394 | f keyword1 keyword2 [directory] \| mv /path/to -R 递归搜索后移动 395 | 396 | # 列出文件 397 | l path1 path2 ls by name 398 | ll path1 path2 ls by name 反序 399 | ln path1 path2 ls by name 400 | lnn path1 path2 ls by name 反序 401 | lt path1 path2 ls by time 402 | ltt path1 path2 ls by time 反序 403 | ls path1 path2 ls by size 404 | lss path1 path2 ls by size 反序 405 | l /doc/books /videos 406 | # 以下是只列出文件或文件夹 407 | l path1 path2 -t f ls files 408 | l path1 path2 -t d ls directorys 409 | # 关于-H, -T, -I, -E 410 | # -I, -E, -H, -T 后可跟多个匹配式, 需要放在命令行末尾 411 | l path1 path2 -H head -T tail -I "^re(gul.*) ex(p|g)ress$" 412 | l path1 path2 -H head -T tail -E "^re(gul.*) ex(p|g)ress$" 413 | # 显示绝对路径 414 | l path1 path2 -v 415 | # 显示文件size, md5 416 | l path1 path2 -vv 417 | # 空文件夹 418 | l path1 path2 -t e,d 419 | # 非空文件夹 420 | l path1 path2 -t ne,d 421 | 422 | # 分享文件 423 | S 或 share path1 path2 为每个提供的文件路劲创建分享链接 424 | S 或 share [-P pawd 或 --passwd pawd] path1 path2 为每个提供的路径创建加密的分享链接 425 | 426 | # 查看文件占用空间 427 | du path1 path2 文件夹下所有*文件(不包含下层文件夹)*总大小 428 | du path1 path2 -R 文件夹下所有*文件(包含下层文件夹)*总大小 429 | 如果下层文件多,会花一些时间 430 | # 相当于 l path1 path2 -t du [-R] 431 | # eg: 432 | du /doc /videos -R 433 | 434 | # 离线下载 435 | a 或 add http https ftp ed2k remotepath 436 | a 或 add magnet remotepath [-t {m,i,d,p}] 437 | a 或 add remote_torrent [-t {m,i,d,p}] # 使用网盘中torrent 438 | 439 | # 离线任务操作 440 | j 或 job # 列出离线下载任务 441 | jd 或 jobdump # 清除全部 *非正在下载中的任务* 442 | jc 或 jobclear taskid1 taskid2 # 清除 *正在下载中的任务* 443 | jca 或 jobclearall # 清除 *全部任务* 444 | ``` 445 | 446 | #### 参数: 447 | 448 | ``` 449 | -a num, --aria2c num aria2c 分段下载数量: eg: -a 10 450 | -g num, --aget_s num aget 分段下载数量: eg: -g 100 451 | -k num, --aget_k size aget 分段大小: eg: -k 200K 452 | -k 1M 453 | -k 2M 454 | --appid num 设置 app-id. 如果无法下载或下载慢, 尝试设置为 778750 455 | -o path, --outdir path 指定下周目录: eg: -o /path/to/directory 456 | -p, --play play with mpv 457 | -P password, --passwd password 分享密码,加密密码 458 | -y, --yes yes # 用于 rmre, mvre, cpre, rnre !!慎用 459 | -q, --quiet 无输出模式, 用于 download, play 460 | -V, --VERIFY verification 461 | -v, --view view detail 462 | eg: 463 | l -v # 显示绝对路径 464 | a magnet /path -v # 离线下载并显示下载的文件 465 | d -p url1 url2 -v # 显示播放文件的完整路径 466 | l path1 path2 -vv # 显示文件的size, md5 467 | -s SECRET, --secret SECRET 提取密码 468 | -f number, --from_ number 从第几个开始(用于download, play),eg: p /video -f 42 469 | -t ext, --type_ ext 类型参数, 用 “,” 分隔 470 | eg: 471 | -t fs # 换用下载服务器,用于下载、播放 472 | # 如果wiki中的速度解决方法不管用,可以试试加该参数 473 | d -t dc # 下载并解密,覆盖加密文件(默认) 474 | d -t dc,no # 下载并解密,不覆盖加密文件 475 | dc -t no # 解密,不覆盖加密文件 476 | d -t ie # ignore error, 忽略除Ctrl-C以外的下载错误 477 | d -t 8s # 检测文件是否是“百度8秒”,如果是则不下载 478 | p -t m3 # 播放流媒体(m3u8) 479 | s -t c # 连续转存 (如果转存出错,再次运行命令 480 | # 可以从出错的地方开始,用于转存大量文件时) 481 | l -t f # 文件 482 | l -t d # 文件夹 483 | l -t du # 查看文件占用空间 484 | l -t e,d # 空文件夹 485 | f -t all # 搜索所有账户 486 | a -t m,d,p,a 487 | u -t ec # encrypt, 加密上传, 默认加前缀 488 | u -t ec,np # encrypt, 加密上传, 不加前缀 489 | u -t r # 只进行 rapidupload 490 | u -t e # 如果云端已经存在则不上传(不比对md5) 491 | u -t r,e 492 | -t s # shuffle,乱序 493 | -l amount, --limit amount 下载速度限制,eg: -l 100k 494 | -m {o,c}, --mode {o,c} 模式: o # 重新上传. c # 连续上传. 495 | 加密方法: https://github.com/shadowsocks/shadowsocks/wiki/Encryption 496 | -R, --recursive 递归, 用于download, play, upload, ls, find, rmre, rnre, rmre, cpre 497 | -H HEADS, --head HEADS 匹配开头的字符,eg: -H Head1 Head2 498 | -T TAILS, --tail TAILS 匹配结尾的字符,eg: -T Tail1 Tail2 499 | -I INCLUDES, --include INCLUDES 不排除匹配到表达的文件名, 可以是正则表达式,eg: -I ".*.mp3" ".*.avi" 500 | -E EXCLUDES, --exclude EXCLUDES 排除匹配到表达的文件名, 可以是正则表达式,eg: -E ".*.html" ".*.jpg" 501 | -c {on, off}, --ls_color {on, off} ls 颜色,默认是on 502 | 503 | # -t, -H, -T, -I, -E 都能用于 download, play, ls, find, rnre, rmre, cpre, mvre 504 | ``` 505 | 506 | #### 3. 用法 507 | 508 | bp 是pan.baidu.com.py的马甲 (alias bp='python2 /path/to/pan.baidu.com.py') 509 | 510 | #### 登录: 511 | 512 | ``` 513 | bp g 514 | bp login 515 | bp login username 516 | bp login username password 517 | 518 | # 多帐号登录 519 | # 一直用 bp login 即可 520 | ``` 521 | 522 | 523 | #### cookie 登录: 524 | 525 | 1. 打开 chrome 隐身模式窗口 526 | 2. 在隐身模式窗口登录 pan.baidu.com 527 | 3. 在登录后的页面打开 chrome 开发者工具(怎么打开自行google),选择 `Network` ,然后刷新页面。在刷新后的 `Network` 的 `Name` 列表中选中 `list?dir=…` 开头的一项,然后在右侧找到 `Cookie:` ,复制 `Cookie:` 后面的所有内容。 528 | 4. 用 `pan.baidu.com.py` 登录,`password / cookie:` 处粘贴上面复制的内容。(粘贴后是看不见的)。 529 | 5. 不要退出 pan.baidu.com,只是关闭隐身模式窗口就可以。 530 | 531 | > 如果使用 cookie 登录,`username` 可以是任意的东西。 532 | 533 | #### 删除帐号: 534 | 535 | ``` 536 | bp ud 537 | ``` 538 | 539 | #### 切换帐号: 540 | 541 | ``` 542 | bp uc 543 | ``` 544 | 545 | #### 帐号信息: 546 | 547 | ``` 548 | bp user 549 | ``` 550 | 551 | #### 显示当前工作目录 552 | 553 | ``` 554 | bp cwd 555 | ``` 556 | 557 | #### 切换当前工作目录 558 | 559 | ``` 560 | bp cd # 切换到 / 561 | bp cd path # 支持 ./../... 562 | bp cd .. 563 | bp cd ../../Music 564 | bp cd ... 565 | ``` 566 | 567 | #### 下载: 568 | 569 | ``` 570 | ## 下载、播放速度慢? 571 | 如果无法下载或下载慢, 尝试设置参数 --appid 778750 572 | bp d /path/file --appid 778750 573 | 574 | # 下载当前工作目录 (递归) 575 | bp d . -R 576 | 577 | # 下载自己网盘中的*单个或多个文件* 578 | bp d http://pan.baidu.com/disk/home#dir/path=/path/to/filename1 http://pan.baidu.com/disk/home#dir/path=/path/to/filename2 579 | # or 580 | bp d /path/to/filename1 /path/to/filename2 581 | 582 | # 递归下载自己网盘中的*单个或多个文件夹* 583 | bp d -R http://pan.baidu.com/disk/home#dir/path=/path/to/directory1 http://pan.baidu.com/disk/home#dir/path=/path/to/directory2 584 | # or 585 | bp d -R /path/to/directory1 /path/to/directory2 586 | # 递归下载后缀为 .mp3 的文件 587 | bp d -R /path/to/directory1 /path/to/directory2 -T .mp3 588 | 589 | # 非递归下载 590 | bp d relative_path/to/directory1 /path/to/directory2 591 | 592 | # 下载别人分享的*单个文件* 593 | bp d http://pan.baidu.com/s/1o6psfnxx 594 | bp d 'http://pan.baidu.com/share/link?shareid=1622654699&uk=1026372002&fid=2112674284' 595 | 596 | # 下载别人加密分享的*单个文件*,密码参数-s 597 | bp d http://pan.baidu.com/s/1i3FVlw5 -s vuej 598 | 599 | # 用aria2 下载 600 | bp d http://pan.baidu.com/s/1i3FVlw5 -s vuej -a 5 601 | bp d /movie/her.mkv -a 4 602 | bp d url -s [secret] -a 10 603 | 604 | # 用 aget 下载 605 | bp d http://pan.baidu.com/s/1i3FVlw5 -s vuej -g 100 606 | bp d /movie/her.mkv -g 100 -k 200K 607 | bp d url -s [secret] -g 100 -k 100K 608 | 如果下载速度很慢,可以试试加大 -g, 减小 -k, -k 一般在 100K ~ 300K 之间合适 609 | 610 | # 下载并解码 611 | ## 默认加密方法为 aes-256-cfb 612 | bp d /path/to/encrypted_file -t dc [-P password] # 覆盖加密文件 (默认) 613 | bp d /path/to/encrypted_file -t dc,no [-P password] # 不覆盖加密文件 614 | ## 设置加密方法 615 | bp d /path/to/encrypted_file -t dc [-P password] -m 'rc4-md5' 616 | bp d /path/to/directory -t dc [-P password] -m 'rc4-md5' 617 | ``` 618 | 619 | #### 解码已下载的加密文件: 620 | 621 | ``` 622 | bp dc /local/to/encrypted_file [-P password] -m 'aes-256-cfb' 623 | bp dc /local/to/encrypted_file [-P password] 624 | bp dc /local/to/directory [-P password] 625 | ``` 626 | 627 | #### 播放: 628 | 629 | ``` 630 | bp p /movie/her.mkv 631 | bp p http://pan.baidu.com/s/xxxxxxxxx -s [secret] 632 | 633 | bp cd /movie 634 | bp p movie -R # 递归播放 /movie 中所有媒体文件 635 | 636 | # 播放流媒体(m3u8) 637 | 上面的命令后加 -t m3 638 | 清晰度与在浏览器上播放的一样. 639 | 如果源文件是高清的(720P,1280P),那么流媒体会自动转为480P. 640 | ``` 641 | 642 | #### 离线下载: 643 | 644 | ``` 645 | bp a http://mirrors.kernel.org/archlinux/iso/latest/archlinux-2014.06.01-dual.iso /path/to/save 646 | bp a https://github.com/PeterDing/iScript/archive/master.zip /path/to/save 647 | bp a ftp://ftp.netscape.com/testfile /path/to/save 648 | 649 | bp a 'magnet:?xt=urn:btih:64b7700828fd44b37c0c045091939a2c0258ddc2' /path/to/save -v -t a 650 | bp a 'ed2k://|file|[美]徐中約《中国近代史》第六版原版PDF.rar|547821118|D09FC5F70DEA63E585A74FBDFBD7598F|/' /path/to/save 651 | 652 | bp a /path/to/a.torrent -v -t m,i # 使用网盘中torrent,下载到/path/to 653 | # 注意 ------------------ 654 | ↓ 655 | 网盘中的torrent 656 | ``` 657 | 658 | #### magnet离线下载 -- 文件选择: 659 | 660 | ``` 661 | -t m # 视频文件 (默认), 如: mkv, avi ..etc 662 | -t i # 图像文件, 如: jpg, png ..etc 663 | -t d # 文档文件, 如: pdf, doc, docx, epub, mobi ..etc 664 | -t p # 压缩文件, 如: rar, zip ..etc 665 | -t a # 所有文件 666 | m, i, d, p, a 可以任意组合(用,分隔), 如: -t m,i,d -t d,p -t i,p 667 | remotepath 默认为 / 668 | 669 | bp a 'magnet:?xt=urn:btih:64b7700828fd44b37c0c045091939a2c0258ddc2' /path/to/save -v -t p,d 670 | bp a /download/a.torrent -v -t m,i,d # 使用网盘中torrent,下载到/download 671 | ``` 672 | 673 | #### 离线任务操作: 674 | 675 | ``` 676 | bp j 677 | bp j 3482938 8302833 678 | bp jd 679 | bp jc taskid1 taskid2 680 | bp jc 1208382 58239221 681 | bp jca 682 | ``` 683 | 684 | #### 上传: (默认为非递归,递归加 -R) 685 | 686 | ``` 687 | # 支持文件类型选择 688 | bp u ~/Documents/* # 默认上传所以文件 689 | bp u ~/Documents/* -t f # 不上传文件夹 690 | bp u ~/Documents/* -t d # 不上传文件 691 | bp u ~/Documents/* -t f,d # 不上传文件和文件夹 692 | 693 | bp u ~/Documents/reading/三体\ by\ 刘慈欣.mobi /doc -m o 694 | # 上传模式: 695 | # -m o --> 重传 696 | # -m c --> 续传 (默认) 697 | # 递归加-R 698 | 699 | bp u ~/Videos/*.mkv /videos -t r 700 | # 只进行rapidupload 701 | 702 | bp u ~/Documents ~/Videos ~/Documents /backup -t e -R 703 | # 如果云端已经存在则不上传(不比对md5) 704 | # 用 -t e 时, -m o 无效 705 | 706 | bp u ~/Documents ~/Videos ~/Documents /backup -t r,e # 以上两种模式 707 | ``` 708 | 709 | #### 加密上传: (默认为非递归,递归加 -R) 710 | 711 | ``` 712 | bp u ~/{p1,p2,p3} -t ec [-P password] # 默认加密方法 'aes-256-cfb' 713 | bp u ~/{p1,p2,p3} -t ec [-P password] -m 'rc4-md5' 714 | 715 | # 注意: 716 | # 上传后的文件名会默认加上前缀 encrypted_ 717 | # 不加前缀用 -t ec,np 718 | ``` 719 | 720 | #### 转存: 721 | 722 | ``` 723 | bp s url remotepath [-s secret] 724 | # url是他人分享的连接, 如: http://pan.baidu.com/share/link?shareid=xxxxxxx&uk=xxxxxxx, http://pan.baidu.com/s/xxxxxxxx 725 | bp s 'http://pan.baidu.com/share/link?shareid=xxxxxxx&uk=xxxxxxx' /path/to/save 726 | bp s http://pan.baidu.com/s/xxxxxxxx /path/to/save 727 | bp s http://pan.baidu.com/s/xxxxxxxx /path/to/save -s xxxx 728 | bp s http://pan.baidu.com/s/xxxxxxxx#dir/path=/path/to/anything /path/to/save -s xxxx 729 | 730 | bp s http://pan.baidu.com/inbox/i/xxxxxxxx /path/to/save 731 | 732 | # -t c 连续转存 (如果转存出错,再次运行命令可以从出错的地方开始,用于转存大量文件时) 733 | bp s 'http://pan.baidu.com/share/link?shareid=2705944270&uk=708312363' /path/to/save -t c 734 | # 注意:再次运行时,命令要一样。 735 | ``` 736 | 737 | #### 搜索: 738 | 739 | ``` 740 | # 默认搜索当前服务器工作目录 cwd 741 | bp f keyword1 keyword2 742 | bp f "this is one keyword" "this is another keyword" /path/to/search 743 | 744 | bp f ooxx -R 745 | bp f 三体 /doc/fiction -R 746 | bp f 晓波 /doc -R 747 | 748 | bp ff keyword1 keyword2 /path/to/music 非递归搜索 反序 749 | bp ft keyword1 keyword2 /path/to/doc 非递归搜索 by time 750 | bp ftt keyword1 keyword2 /path/to/other 非递归搜索 by time 反序 751 | bp fs keyword1 keyword2 非递归搜索 by size 752 | bp fss keyword1 keyword2 非递归搜索 by size 反序 753 | bp fn keyword1 keyword2 非递归搜索 by name 754 | bp fnn keyword1 keyword2 非递归搜索 by name 反序 755 | 756 | # 递归搜索加 -R 757 | # 关于-H, -T, -I, -E 758 | bp f mp3 /path/to/search -H "[" "01" -T ".tmp" -I ".*-.*" -R 759 | 760 | # 搜索所有的账户 761 | bp f iDoNotKnow [directory] -t all -R 762 | bp f archlinux ubuntu [directory] -t f,all -T .iso -R 763 | 764 | # 搜索 加 通道(只支持 donwload, play, rnre, rm, mv) 765 | bp f bioloy \| d -R 递归搜索后递归下载 766 | bp ftt ooxx \| p -R -t f 递归搜索(by time 反序)后递归播放 767 | bp f sound \| rnr mp3 mp4 -R 递归搜索后rename by regex 768 | bp f ccav \| rm -R -T avi 递归搜索后删除 769 | bp f 新闻联播(大结局) \| mv /Favor -R 递归搜索后移动 770 | ``` 771 | 772 | #### 恢复用bt.py做base64加密的文件: 773 | 774 | ``` 775 | rnr /ooxx -t f,bd64 776 | !! 注意: /ooxx 中的所有文件都必须是被base64加密的,且加密段要有.base64后缀 777 | # 可以参考 by.py 的用法 778 | ``` 779 | 780 | ls、重命名、移动、删除、复制、使用正则表达式进行文件操作: 781 | 782 | 见[命令](#cmd) 783 | 784 | #### 4. 参考: 785 | 786 | > https://gist.github.com/HououinRedflag/6191023 787 | 788 | 789 | > https://github.com/banbanchs/pan-baidu-download/blob/master/bddown_core.py 790 | 791 | 792 | > https://github.com/houtianze/bypy 793 | 794 | 795 | > 3个方法解决百度网盘限速: https://www.runningcheese.com/baiduyun 796 | 797 | 798 | --- 799 | 800 | 801 | ### bt.py - magnet torrent 互转、及 过滤敏.感.词 802 | 803 | #### 1. 依赖 804 | 805 | ``` 806 | python2-requests (https://github.com/kennethreitz/requests) 807 | bencode (https://github.com/bittorrent/bencode) 808 | ``` 809 | 810 | #### 2. 使用说明 811 | 812 | magnet 和 torrent 的相互转换 813 | 814 | 过滤敏.感.词功能用于净网时期的 baidu, xunlei 815 | 816 | 在中国大陆使用代理可能有更好的效果: 817 | 使用代理有两种方法: 818 | 1. shadowsocks + proxychains 819 | 2. -p protocol://ip:port 820 | 821 | ~~8.30日后,无法使用。 见 http://tieba.baidu.com/p/3265467666~~ 822 | 823 | [**百度云疑似解封,百度网盘内八秒视频部分恢复**](http://fuli.ba/baiduyunhuifuguankan.html) 824 | 825 | **!! 注意:过滤后生成的torrent在百度网盘只能用一次,如果需要再次使用,则需用 -n 改顶层目录名** 826 | 827 | 磁力连接转种子,用的是 828 | 829 | ``` 830 | http://bt.box.n0808.com 831 | http://btcache.me 832 | http://www.sobt.org # 302 --> http://www.win8down.com/url.php?hash= 833 | http://www.31bt.com 834 | http://178.73.198.210 835 | http://www.btspread.com # link to http://btcache.me 836 | http://torcache.net 837 | http://zoink.it 838 | http://torrage.com # 用torrage.com需要设置代理, eg: -p 127.0.0.1:8087 839 | http://torrentproject.se 840 | http://istoretor.com 841 | http://torrentbox.sx 842 | http://www.torrenthound.com 843 | http://www.silvertorrent.org 844 | http://magnet.vuze.com 845 | ``` 846 | 847 | 如果有更好的种子库,请提交issue 848 | 849 | > 对于baidu, 加入离线任务后,需等待一段时间才会下载完成。 850 | 851 | #### 命令: 852 | 853 | ``` 854 | # magnet 2 torrent 855 | m 或 mt magnet_link1 magnet_link2 [-d /path/to/save] 856 | m -i /there/are/files -d new 857 | 858 | # torrent 2 magnet, 输出magnet 859 | t 或 tm path1 path2 860 | 861 | # 过滤敏.感.词 862 | # 有2种模式 863 | # -t n (默认) 用数字替换文件名 864 | # -t be64 用base64加密文件名,torrent用百度下载后,可用 pan.baidu.com.py rnr /path -t f,bd64 改回原名字 865 | c 或 ct magnet_link1 magnet_link2 /path/to/torrent1 /path/to/torrent2 [-d /path/to/save] 866 | c -i /there/are/files and_other_dir -d new # 从文件或文件夹中寻找 magnet,再过滤 867 | # 过滤敏.感.词 - 将magnet或torrent转成不敏感的 torrent 868 | # /path/to/save 默认为 . 869 | 870 | # 用base64加密的文件名: 871 | c magnet_link1 magnet_link2 /path/to/torrent1 /path/to/torrent2 [-d /path/to/save] -t be64 872 | 873 | # 使用正则表达式过滤敏.感.词 874 | cr 或 ctre foo bar magnet_link1 /path/to/torrent1 [-d /path/to/save] 875 | # foo bar 都是 regex 876 | ``` 877 | 878 | #### 参数: 879 | 880 | ``` 881 | -p PROXY, --proxy PROXY proxy for torrage.com, eg: -p "sooks5://127.0.0.1:8883" 882 | -t TYPE_, --type_ TYPE_ 类型参数: 883 | -t n (默认) 用数字替换文件名 884 | -t be64 用base64加密文件名,torrent用百度下载后,可用 pan.baidu.com.py rnr /path -t f,bd64 改回原名字 885 | -d DIRECTORY, --directory DIRECTORY 指定torrents的保存路径, eg: -d /path/to/save 886 | -n NAME, --name NAME 顶级文件夹名称, eg: -m thistopdirectory 887 | -i localpath1 localpath2, --import_from localpath1 localpath2 从本地文本文件导入magnet (用正则表达式匹配) 888 | ``` 889 | 890 | #### 3. 用法 891 | 892 | bt 是bt.py的马甲 (alias bt='python2 /path/to/bt.py') 893 | 894 | ``` 895 | bt mt magnet_link1 magnet_link2 [-d /path/to/save] 896 | bt tm path1 path2 897 | bt ct magnet_link1 path1 [-d /path/to/save] 898 | 899 | bt m magnet_link1 magnet_link2 [-d /path/to/save] 900 | bt t path1 path2 901 | bt c magnet_link1 path1 [-d /path/to/save] 902 | 903 | # 用torrage.com 904 | bt m magnet_link1 path1 -p 127.0.0.1:8087 905 | bt c magnet_link1 path1 -p 127.0.0.1:8087 906 | 907 | # 从文件或文件夹中寻找 magnet,再过滤 908 | bt c -i ~/Downloads -d new 909 | 910 | # 使用正则表达式过滤敏.感.词 911 | bt cr '.*(old).*' '\1' magnet_link 912 | bt cr 'old.iso' 'new.iso' /path/to/torrent 913 | 914 | # 用base64加密的文件名: 915 | bt c magnet_link -t be64 916 | ``` 917 | 918 | #### 4. 参考: 919 | 920 | > http://blog.chinaunix.net/uid-28450123-id-4051635.html 921 | 922 | 923 | > http://en.wikipedia.org/wiki/Torrent_file 924 | 925 | 926 | --- 927 | 928 | 929 | ### 115.py - 115网盘的下载和播放 930 | 931 | #### 1. 依赖 932 | 933 | ``` 934 | wget 935 | 936 | aria2 (~ 1.18) 937 | 938 | python2-requests (https://github.com/kennethreitz/requests) 939 | 940 | mpv (http://mpv.io) 941 | 942 | mplayer # 我的linux上mpv播放wmv出错,换用mplayer 943 | ``` 944 | 945 | #### 2. 使用说明 946 | 947 | 初次使用需要登录 pan115 login 948 | 949 | **脚本是用于下载自己的115网盘文件,不支持他人分享文件。** 950 | 951 | 下载工具默认为wget, 可用参数-a选用aria2。 952 | 953 | **现在vip和非vip用户下载只能有1个通道,用aria2下载已经无意义。** 954 | 955 | 对所有文件,默认执行下载(用wget),如要播放媒体文件,加参数-p。 956 | 957 | **非vip用户下载太慢,已经不支持播放。 vip播放正常** 958 | 959 | 下载的文件,保存在当前目录下。 960 | 961 | cookies保存在 ~/.115.cookies 962 | 963 | 关于播放操作: 964 | 965 | > 在运行脚本的终端,输入1次Enter,关闭当前播放并播放下一个文件,连续输入2次Enter,关闭当前播放并退出。 966 | 967 | #### 参数: 968 | 969 | ``` 970 | -a, --aria2c download with aria2c 971 | -p, --play play with mpv 972 | -f number, --from_ number 从第几个开始下载,eg: -f 42 973 | -t ext, --type_ ext 要下载的文件的后缀,eg: -t mp3 974 | -l amount, --limit amount 下载速度限制,eg: -l 100k 975 | -d "url" 增加离线下载 "http/ftp/magnet/ed2k" 976 | ``` 977 | 978 | #### 3. 用法 979 | 980 | pan115 是115.py的马甲 (alias pan115='python2 /path/to/115.py') 981 | 982 | ``` 983 | # 登录 984 | pan115 g 985 | pan115 login 986 | pan115 login username 987 | pan115 login username password 988 | 989 | # 退出登录 990 | pan115 signout 991 | 992 | # 递归下载自己网盘中的*文件夹* 993 | pan115 http://115.com/?cid=xxxxxxxxxxxx&offset=0&mode=wangpan 994 | 995 | # 下载自己网盘中的*单个文件* -- 只能是115上可单独打开的文件,如pdf,视频 996 | pan115 http://wenku.115.com/preview/?pickcode=xxxxxxxxxxxx 997 | 998 | # 下载用aria2, url 是上面的 999 | pan115 -a url 1000 | 1001 | # 增加离线下载 1002 | pan115 -d "magnet:?xt=urn:btih:757fc565c56462b28b4f9c86b21ac753500eb2a7&dn=archlinux-2014.04.01-dual.iso" 1003 | ``` 1004 | 1005 | #### 播放 1006 | 1007 | ``` 1008 | # url 是上面的 1009 | pan115 -p url 1010 | ``` 1011 | 1012 | #### 4. 参考: 1013 | 1014 | > http://passport.115.com/static/wap/js/common.js?v=1.6.39 1015 | 1016 | --- 1017 | 1018 | 1019 | ### yunpan.360.cn.py - 360网盘的下载 1020 | 1021 | **!!!脚本已不再维护!!!** 1022 | 1023 | #### 1. 依赖 1024 | 1025 | ``` 1026 | wget 1027 | 1028 | aria2 (~ 1.18) 1029 | 1030 | python2-requests (https://github.com/kennethreitz/requests) 1031 | ``` 1032 | 1033 | #### 2. 使用说明 1034 | 1035 | 初次使用需要登录 yp login 1036 | 1037 | **!!!!!! 万恶的360不支持断点续传 !!!!!!** 1038 | 1039 | 由于上面的原因,不能播放媒体文件。 1040 | 1041 | 只支持自己的\*文件夹\*的递归下载。 1042 | 1043 | 下载工具默认为wget, 可用参数-a选用aria2 1044 | 1045 | 下载的文件,保存在当前目录下。 1046 | 1047 | cookies保存在 ~/.360.cookies 1048 | 1049 | #### 参数: 1050 | 1051 | ``` 1052 | -a, --aria2c download with aria2c 1053 | -f number, --from_ number 从第几个开始下载,eg: -f 42 1054 | -t ext, --type_ ext 要下载的文件的后缀,eg: -t mp3 1055 | -l amount, --limit amount 下载速度限制,eg: -l 100k 1056 | ``` 1057 | 1058 | #### 3. 用法 1059 | 1060 | yp 是yunpan.360.cn.py的马甲 (alias yp='python2 /path/to/yunpan.360.cn.py') 1061 | 1062 | ``` 1063 | # 登录 1064 | yp g 1065 | yp login 1066 | yp login username 1067 | yp login username password 1068 | 1069 | # 退出登录 1070 | yp signout 1071 | 1072 | # 递归下载自己网盘中的*文件夹* 1073 | yp http://c17.yunpan.360.cn/my/?sid=#/path/to/directory 1074 | yp http://c17.yunpan.360.cn/my/?sid=#%2Fpath%3D%2Fpath%2Fto%2Fdirectory 1075 | # or 1076 | yp sid=/path/to/directory 1077 | yp sid%3D%2Fpath%2Fto%2Fdirectory 1078 | 1079 | # 下载用aria2, url 是上面的 1080 | yp -a url 1081 | ``` 1082 | 1083 | #### 4. 参考: 1084 | 1085 | > https://github.com/Shu-Ji/gorthon/blob/master/_3rdapp/CloudDisk360/main.py 1086 | 1087 | --- 1088 | 1089 | 1090 | ### music.baidu.com.py - 下载或播放高品质百度音乐(music.baidu.com) 1091 | 1092 | #### 1. 依赖 1093 | 1094 | ``` 1095 | wget 1096 | 1097 | python2-mutagen (https://code.google.com/p/mutagen/) 1098 | 1099 | mpv (http://mpv.io) 1100 | ``` 1101 | 1102 | #### 2. 使用说明 1103 | 1104 | 默认执行下载,如要播放,加参数-p。 1105 | 1106 | #### 参数: 1107 | 1108 | ``` 1109 | -f, --flac download flac 1110 | -i, --high download 320, default 1111 | -l, --low download 128 1112 | -p, --play play with mpv 1113 | ``` 1114 | 1115 | 下载的MP3默认添加id3 tags,保存在当前目录下。 1116 | 1117 | 关于播放操作: 1118 | 1119 | > 在运行脚本的终端,输入1次Enter,关闭当前播放并播放下一个文件,连续输入2次Enter,关闭当前播放并退出。 1120 | 1121 | #### 3. 用法 1122 | 1123 | bm 是music.baidu.com.py的马甲 (alias bm='python2 /path/to/music.baidu.com.py') 1124 | 1125 | ``` 1126 | # 下载专辑 1127 | bm http://music.baidu.com/album/115032005 1128 | 1129 | # 下载单曲 1130 | bm http://music.baidu.com/song/117948039 1131 | ``` 1132 | 1133 | #### 播放: 1134 | 1135 | ``` 1136 | # url 是上面的 1137 | bm -p url 1138 | ``` 1139 | 1140 | #### 4. 参考: 1141 | 1142 | > http://v2ex.com/t/77685 # 第9楼 1143 | 1144 | --- 1145 | 1146 | 1147 | ### music.163.com.py - 下载或播放高品质网易音乐(music.163.com) 1148 | 1149 | #### 1. 依赖 1150 | 1151 | ``` 1152 | wget 1153 | 1154 | python2-requests (https://github.com/kennethreitz/requests) 1155 | 1156 | python2-mutagen (https://code.google.com/p/mutagen/) 1157 | 1158 | mpv (http://mpv.io) 1159 | ``` 1160 | 1161 | #### 2. 使用说明 1162 | 1163 | **默认下载和播放高品质音乐,如果服务器没有高品质音乐则转到低品质音乐。** 1164 | 1165 | 默认执行下载,如要播放,加参数-p。 1166 | 1167 | 下载的MP3默认添加id3 tags,保存在当前目录下。 1168 | 1169 | 关于播放操作: 1170 | 1171 | > 在运行脚本的终端,输入1次Enter,关闭当前播放并播放下一个文件,连续输入2次Enter,关闭当前播放并退出。 1172 | 1173 | #### 3. 用法 1174 | 1175 | nm 是music.163.com.py的马甲 (alias nm='python2 /path/to/music.163.com.py') 1176 | 1177 | ``` 1178 | # 下载专辑 1179 | nm http://music.163.com/#/album?id=18915 1180 | 1181 | # 下载单曲 1182 | nm http://music.163.com/#/song?id=186114 1183 | 1184 | # 下载歌单 1185 | nm http://music.163.com/#/playlist?id=12214308 1186 | 1187 | # 下载该艺术家所有专辑或 Top 50 歌曲 1188 | nm http://music.163.com/#/artist?id=6452 1189 | 1190 | # 下载DJ节目 1191 | nm http://music.163.com/#/dj?id=675051 1192 | 1193 | # 下载排行榜 1194 | nm http://music.163.com/#/discover/toplist?id=11641012 1195 | ``` 1196 | 1197 | #### 播放: 1198 | 1199 | ``` 1200 | # url 是上面的 1201 | nm -p url 1202 | ``` 1203 | 1204 | #### 4. 参考: 1205 | 1206 | > https://github.com/yanunon/NeteaseCloudMusic/wiki/%E7%BD%91%E6%98%93%E4%BA%91%E9%9F%B3%E4%B9%90API%E5%88%86%E6%9E%90 1207 | 1208 | 1209 | > http://s3.music.126.net/s/2/core.js 1210 | 1211 | --- 1212 | 1213 | 1214 | ### flv_cmd.py - 基于在线服务的视频解析 client - 支持下载、播放 1215 | 1216 | **!!!脚本已不再维护!!!** 1217 | 1218 | **请使用 youtube-dl or you-get** 1219 | 1220 | #### 1. 依赖 1221 | 1222 | ``` 1223 | wget 1224 | 1225 | python2-requests (https://github.com/kennethreitz/requests) 1226 | 1227 | mpv (http://mpv.io) 1228 | ``` 1229 | 1230 | #### 2. 使用说明 1231 | 1232 | ~~flvxz.com 视频解析~~ 不能用。 1233 | 1234 | flvgo.com 视频解析 1235 | 1236 | **不提供视频合并操作** 1237 | 1238 | #### 支持的网站: 1239 | 1240 | http://flvgo.com/sites 1241 | 1242 | 关于播放操作: 1243 | 1244 | > 在运行脚本的终端,输入1次Enter,关闭当前播放并播放下一个文件,连续输入2次Enter,关闭当前播放并退出。 1245 | 1246 | #### 3. 用法 1247 | 1248 | fl是flv_cmd.py的马甲 (alias fl='python2 /path/to/flv_cmd.py') 1249 | 1250 | #### 下载: 1251 | 1252 | ``` 1253 | fl http://v.youku.com/v_show/id_XNTI2Mzg4NjAw.html 1254 | fl http://www.tudou.com/albumplay/Lqfme5hSolM/tJ_Gl3POz7Y.html 1255 | ``` 1256 | 1257 | #### 播放: 1258 | 1259 | ``` 1260 | # url 是上面的 1261 | fl url -p 1262 | ``` 1263 | 1264 | #### 4. 相关脚本: 1265 | 1266 | > https://github.com/soimort/you-get 1267 | 1268 | 1269 | > https://github.com/iambus/youku-lixian 1270 | 1271 | 1272 | > https://github.com/rg3/youtube-dl 1273 | 1274 | --- 1275 | 1276 | 1277 | ### tumblr.py - 下载某个tumblr.com的所有图片、视频、音频 1278 | 1279 | #### 1. 依赖 1280 | 1281 | ``` 1282 | wget 1283 | 1284 | mpv (http://mpv.io) 1285 | 1286 | python2-requests (https://github.com/kennethreitz/requests) 1287 | ``` 1288 | 1289 | #### 2. 使用说明 1290 | 1291 | * 使用前需用在 http://www.tumblr.com/oauth/apps 加入一个app,证实后得到api_key,再在源码中填入,完成后则可使用。 1292 | 1293 | * 或者用 http://www.tumblr.com/docs/en/api/v2 提供的api_key ( fuiKNFp9vQFvjLNvx4sUwti4Yb5yGutBN4Xh10LXZhhRKjWlV4 ) 1294 | 1295 | 默认开10个进程,如需改变用参数-p [num]。 1296 | 1297 | 下载的文件,保存在当前目录下。 1298 | 1299 | 默认下载图片(原图)。 1300 | 1301 | 支持连续下载,下载进度储存在下载文件夹内的 json.json。 1302 | 1303 | **正确退出程序使用 Ctrl-C** 1304 | **下载 更新的图片或其他 用 tumblr --update URL, 或 删除 json.json** 1305 | 1306 | #### 参数: 1307 | 1308 | ``` 1309 | -p PROCESSES, --processes PROCESSES 指定多进程数,默认为10个,最多为20个 eg: -p 20 1310 | -c, --check 尝试修复未下载成功的图片 1311 | -t TAG, --tag TAG 下载特定tag的图片, eg: -t beautiful 1312 | 1313 | -P, --play play with mpv 1314 | -A, --audio download audios 1315 | -V, --video download videos 1316 | -q, --quiet quiet 1317 | 1318 | --update 下载新发布的东西 1319 | --redownload 重新遍历所有的东西,如果有漏掉的东西则下载 1320 | --proxy protocol://address:port 设置代理 1321 | 1322 | -f OFFSET, --offset OFFSET 从第offset个开始,只对 -V 有用。 1323 | ``` 1324 | 1325 | #### 3. 用法 1326 | 1327 | tm是tumblr.py的马甲 (alias tm='python2 /path/to/tumblr.py') 1328 | 1329 | ``` 1330 | # 下载图片 1331 | tm http://sosuperawesome.tumblr.com 1332 | tm http://sosuperawesome.tumblr.com -t beautiful 1333 | 1334 | # 下载图片(使用代理) 1335 | tm http://sosuperawesome.tumblr.com -x socks5://127.0.0.1:1024 1336 | tm http://sosuperawesome.tumblr.com -t beautiful -x socks5://127.0.0.1:1024 1337 | 1338 | # 下载单张图片 1339 | tm http://sosuperawesome.tumblr.com/post/121467716523/murosvur-on-etsy 1340 | 1341 | # 下载视频 1342 | tm url -V 1343 | tm url -V -f 42 1344 | tm url -V -t tag 1345 | 1346 | # 下载单个视频 1347 | tm url/post/1234567890 -V 1348 | 1349 | # 播放视频 1350 | tm url -VP 1351 | tm url -VP -f 42 1352 | 1353 | # 下载音频 1354 | tm url -A 1355 | tm url -A -f 42 1356 | tm url -A -t tag 1357 | 1358 | # 下载单个音频 1359 | tm url/post/1234567890 -A 1360 | 1361 | # 播放音频 1362 | tm url -AP 1363 | tm url -AP -f 42 1364 | 1365 | # 播放音频(quiet) 1366 | tm url -APq 1367 | 1368 | ``` 1369 | 1370 | --- 1371 | 1372 | 1373 | ### unzip.py - 解决linux下unzip乱码的问题 1374 | 1375 | #### 用法 1376 | 1377 | ``` 1378 | python2 unzip.py azipfile1.zip azipfile2.zip 1379 | python2 unzip.py azipfile.zip -s secret 1380 | # -s 密码 1381 | ``` 1382 | 1383 | 代码来自以下连接,我改了一点。 1384 | 1385 | > http://wangqige.com/the-solution-of-unzip-files-which-zip-under-windows/解决在Linux环境下解压zip的乱码问题 1386 | 1387 | --- 1388 | 1389 | 1390 | ### ed2k_search.py - 基于 donkey4u.com 的emule搜索 1391 | 1392 | #### 1. 依赖 1393 | 1394 | ``` 1395 | python2 1396 | ``` 1397 | 1398 | #### 2. 用法 1399 | 1400 | ed 是ed2k_search.py的马甲 (alias ed='python2 /path/to/ed2k_search.py') 1401 | 1402 | ``` 1403 | ed this is a keyword 1404 | or 1405 | ed "this is a keyword" 1406 | ``` 1407 | 1408 | --- 1409 | 1410 | 1411 | ### 91porn.py - 下载或播放91porn 1412 | 1413 | **警告: 18岁以下者,请自觉远离。** 1414 | 1415 | #### 1. 依赖 1416 | 1417 | ``` 1418 | wget 1419 | 1420 | aria2 (~ 1.18) 1421 | 1422 | python2-requests (https://github.com/kennethreitz/requests) 1423 | 1424 | mpv (http://mpv.io) 1425 | ``` 1426 | 1427 | #### 2. 使用说明 1428 | 1429 | > youtube-dl 已支持91porn 1430 | 1431 | 没有解决每个ip *10个/day* 限制 1432 | 1433 | 下载工具默认为wget, 可用参数-a选用aria2 1434 | 1435 | 默认执行下载,如要播放媒体文件,加参数-p。 1436 | 1437 | 下载的文件,保存在当前目录下。 1438 | 1439 | 关于播放操作: 1440 | 1441 | > 在运行脚本的终端,输入1次Enter,关闭当前播放并播放下一个文件,连续输入2次Enter,关闭当前播放并退出。 1442 | 1443 | #### 3. 用法 1444 | 1445 | pn 是91porn.py的马甲 (alias pn='python2 /path/to/91porn.py') 1446 | 1447 | #### 下载: 1448 | 1449 | ``` 1450 | pn url # 91porn.com(或其镜像) 视频的url 1451 | ``` 1452 | 1453 | #### 播放: 1454 | 1455 | ``` 1456 | pn -p url 1457 | ``` 1458 | 1459 | 显示下载链接,但不下载: 1460 | 1461 | ``` 1462 | pn -u url 1463 | ``` 1464 | 1465 | #### 4. 参考 1466 | 1467 | > http://v2ex.com/t/110196 # 第16楼 1468 | 1469 | --- 1470 | 1471 | 1472 | ### ThunderLixianExporter.user.js - A fork of https://github.com/binux/ThunderLixianExporter 1473 | 1474 | **一个github.com/binux的迅雷离线导出脚本的fork。** 1475 | 1476 | 增加了mpv和mplayer的导出。 1477 | 1478 | 用法见: https://github.com/binux/ThunderLixianExporter 1479 | -------------------------------------------------------------------------------- /ThunderLixianExporter.user.js: -------------------------------------------------------------------------------- 1 | // ==UserScript== 2 | // @name ThunderLixianExporter 3 | // @namespace http://dynamic.cloud.vip.xunlei.com/ 4 | // @version 0.75 5 | // @description export thunder lixian url to aria2/wget 6 | // @match http://dynamic.cloud.vip.xunlei.com/user_task* 7 | // @match http://lixian.vip.xunlei.com/lx3_task.html* 8 | // @match http://jiayuan.xunlei.com/lxhome/lx3_task.html* 9 | // @run-at document-end 10 | // @copyright 2012+, Binux 11 | // @updateURL http://s.binux.me/TLE/master/ThunderLixianExporter.meta.js 12 | // ==/UserScript== 13 | 14 | function tle_wrapper() { 15 | // vim: set et sw=2 ts=2 sts=2 ff=unix fenc=utf8: 16 | // Author: Binux 17 | // http://binux.me 18 | // Created on Fri 20 Jul 2012 11:43:22 AM CST 19 | 20 | TLE = {}; 21 | 22 | TLE.exporter = { 23 | '复制链接': function(todown) { 24 | //console.log(todown); 25 | var str = '"; 33 | $("#TLE_text_pop").tpl("TLE_text_tpl", {'title': '复制选中的链接 > 在新窗口中打开', 'content': str}).show().pop({ 34 | onHide: function() { $(document.body).click(); }, 35 | }); 36 | }, 37 | 'Aria2': function(todown) { 38 | //console.log(todown); 39 | var str = ""; 40 | $.each(todown.tasklist, function(n, task) { 41 | $.each(task.filelist, function(l, file) { 42 | if (!file.downurl) return; 43 | var filepath = TLE.safe_title(file.title); 44 | if (task.tasktype == 0 && task.filelist.length > 1) 45 | filepath = TLE.safe_title(task.taskname) + "/" + TLE.safe_title(file.title.replace(/\\+\*?/g,"/")); 46 | str += "aria2c -c -s10 -x10 --out "+TLE.escape_command(filepath)+" --header 'Cookie: gdriveid="+todown.gdriveid+";' '"+file.downurl+"'\n"; 47 | }); 48 | }); 49 | TLE.text_pop("aria2 download command", str); 50 | }, 51 | 'wget': function(todown) { 52 | //console.log(todown); 53 | var str = ""; 54 | $.each(todown.tasklist, function(n, task) { 55 | $.each(task.filelist, function(l, file) { 56 | if (!file.downurl) return; 57 | str += "wget -c -O "+TLE.escape_command(TLE.safe_title(file.title))+" --header 'Cookie: gdriveid="+todown.gdriveid+";' '"+file.downurl+"'\n"; 58 | }); 59 | }); 60 | TLE.text_pop("wget download command", str); 61 | }, 62 | 'mpv': function(todown) { 63 | //console.log(todown); 64 | var str = ""; 65 | $.each(todown.tasklist, function(n, task) { 66 | $.each(task.filelist, function(l, file) { 67 | if (!file.downurl) return; 68 | str += "mpv --really-quiet --cache 8140 --cache-default 8140 --http-header-fields 'Cookie: gdriveid="+todown.gdriveid+";' '"+file.downurl+"'\n"; 69 | }); 70 | }); 71 | TLE.text_pop("play with mpv", str); 72 | }, 73 | 'mplayer': function(todown) { 74 | //console.log(todown); 75 | var str = ""; 76 | $.each(todown.tasklist, function(n, task) { 77 | $.each(task.filelist, function(l, file) { 78 | if (!file.downurl) return; 79 | str += "mplayer -really-quiet -cache 8140 -http-header-fields 'Cookie: gdriveid="+todown.gdriveid+";' '"+file.downurl+"'\n"; 80 | }); 81 | }); 82 | TLE.text_pop("play with mplayer", str); 83 | }, 84 | "YAAW": function(todown) { 85 | if (TLE.getConfig("TLE_aria2_jsonrpc")) { 86 | show_tip("添加中...到YAAW界面查看是否添加成功"); 87 | var aria2 = new ARIA2(TLE.getConfig("TLE_aria2_jsonrpc")); 88 | $.each(todown.tasklist, function(n, task) { 89 | $.each(task.filelist, function(l, file) { 90 | if (!file.downurl) return; 91 | var filepath = TLE.safe_title(file.title); 92 | if (task.tasktype == 0 && task.filelist.length > 1) 93 | filepath = TLE.safe_title(task.taskname) + "/" + TLE.safe_title(file.title.replace(/\\+\*?/g,"/")); 94 | aria2.addUri(file.downurl, {out: filepath, header: 'Cookie: gdriveid='+todown.gdriveid}); 95 | }); 96 | }); 97 | hide_tip(); 98 | } else { 99 | show_tip("尚未设置Aria2 JSONRPC地址"); 100 | hide_tip(); 101 | }; 102 | }, 103 | 'Aria2导出': function(todown) { 104 | //console.log(todown); 105 | var str = ""; 106 | $.each(todown.tasklist, function(n, task) { 107 | $.each(task.filelist, function(l, file) { 108 | if (!file.downurl) return; 109 | var filepath = TLE.safe_title(file.title); 110 | if (task.tasktype == 0 && task.filelist.length > 1) 111 | filepath = TLE.safe_title(task.taskname) + "/" + TLE.safe_title(file.title.replace(/\\+\*?/g,"/")); 112 | str += file.downurl+'\r\n out='+filepath+'\r\n header=Cookie: gdriveid='+todown.gdriveid+'\r\n continue=true\r\n max-connection-per-server=5\r\n split=10\r\n parameterized-uri=true\r\n\r\n'; 113 | }); 114 | }); 115 | TLE.file_pop("Aria2导出文件下载", str, "aria2.down"); 116 | }, 117 | 'IDM导出': function(todown) { 118 | //console.log(todown); 119 | var str = ""; 120 | $.each(todown.tasklist, function(n, task) { 121 | $.each(task.filelist, function(l, file) { 122 | if (!file.downurl) return; 123 | str += '<\r\n'+TLE.url_rewrite(file.downurl, TLE.safe_title(file.title))+'\r\ncookie: gdriveid='+todown.gdriveid+'\r\n>\r\n' 124 | }); 125 | }); 126 | TLE.file_pop("IDM导出文件下载", str, "idm.ef2"); 127 | }, 128 | 'Orbit导出': function(todown) { 129 | //console.log(todown); 130 | var str = ""; 131 | $.each(todown.tasklist, function(n, task) { 132 | $.each(task.filelist, function(l, file) { 133 | if (!file.downurl) return; 134 | str += TLE.url_rewrite(file.downurl, TLE.safe_title(file.title))+'|'+TLE.safe_title(file.title.replace("|", "_"))+'||gdriveid='+todown.gdriveid+'\r\n' 135 | }); 136 | }); 137 | TLE.file_pop("Orbit导出文件下载", str, "orbit.olt"); 138 | }, 139 | 'eagleget': function(todown) { 140 | var ret = {tasks: []}; 141 | $.each(todown.tasklist, function(n, task) { 142 | $.each(task.filelist, function(l, file) { 143 | if (!file.downurl) return; 144 | ret.tasks.push({ 145 | cookie: 'gdriveid='+todown.gdriveid, 146 | fname: TLE.safe_title(file.title), 147 | url: TLE.url_rewrite(file.downurl, TLE.safe_title(file.title)) 148 | }); 149 | }); 150 | }); 151 | TLE.file_pop("Eagleget导出文件下载(test)", JSON.stringify(ret), "eagleget.eg"); 152 | }, 153 | }; 154 | 155 | (function(TLE) { 156 | function get_taskinfo(p) { 157 | var taskid = p.attr("taskid"); 158 | var info = {}; 159 | p.find("input").each(function(n, e) { 160 | var key = e.getAttribute("id").replace(taskid, ""); 161 | info[key] = e.getAttribute("value"); 162 | }); 163 | return info; 164 | }; 165 | 166 | function build_normal_taskinfo(info) { 167 | var taskinfo = { 168 | 'taskname': info.taskname || info.cloud_taskname, 169 | 'f_url': info.f_url, 170 | 'cid': info.dcid || info.cloud_cid, 171 | 'size': parseInt(info.ysfilesize), 172 | 'tasktype': info.d_tasktype, 173 | 'status': info.d_status, 174 | }; 175 | var filelist = []; 176 | filelist.push({ 177 | 'title': info.taskname || info.cloud_taskname, 178 | 'f_url': info.f_url, 179 | 'downurl': info.dl_url || info.cloud_dl_url, 180 | 'cid': info.dcid || info.cloud_cid, 181 | 'gcid': "", 182 | 'size': parseInt(info.ysfilesize), 183 | }); 184 | taskinfo['filelist'] = filelist; 185 | 186 | return taskinfo; 187 | }; 188 | function build_bt_taskinfo(info, rdata) { 189 | var taskinfo = { 190 | 'taskname': info.taskname, 191 | 'f_url': info.f_url, 192 | 'cid': info.dcid, 193 | 'size': parseInt(info.ysfilesize), 194 | 'tasktype': info.d_tasktype, 195 | 'status': info.d_status, 196 | }; 197 | var filelist = []; 198 | $.each(rdata, function(n, e) { 199 | filelist.push({ 200 | 'title': e.title, 201 | 'f_url': e.url, 202 | 'downurl': e.downurl, 203 | 'cid': e.cid, 204 | 'gcid': e.gcid, 205 | 'size': parseInt(e.filesize), 206 | }); 207 | }); 208 | taskinfo['filelist'] = filelist; 209 | return taskinfo; 210 | }; 211 | 212 | TLE.safe_title = function safe_title(title) { 213 | return title.replace(/[\\\|\:\*\"\?\<\>]/g,"_"); 214 | }; 215 | 216 | TLE.down = function(_this, _do) { 217 | var p = $(_this).parents(".rw_list"); 218 | var info = get_taskinfo(p); 219 | console.log(info); 220 | 221 | if (info.d_tasktype == "0") { //bt task 222 | show_tip("载入中..."); 223 | $.getJSON(INTERFACE_URL+"/fill_bt_list?tid="+info.input+"&g_net="+G_section+"&uid="+G_USERID+"&callback=?", function(data) { 224 | hide_tip(); 225 | var todown = {}; 226 | todown.gdriveid = getCookie("gdriveid"); 227 | todown.tasklist = {}; 228 | todown.tasklist[info.input] = build_bt_taskinfo(info, data['Result'][info.input]); 229 | _do(todown); 230 | }); 231 | } else { 232 | var todown = {} 233 | todown.gdriveid = getCookie("gdriveid"); 234 | todown.tasklist = {}; 235 | todown.tasklist[info.input] = build_normal_taskinfo(info); 236 | _do(todown); 237 | }; 238 | }; 239 | 240 | TLE.batch_down = function(_this, _do) { 241 | var ck = document.getElementsByName("ck"); 242 | var bt_task_list = []; 243 | var normal_task_list = []; 244 | $.each(ck, function(n, e) { 245 | if (e.checked == false) return; 246 | 247 | var taskid = e.value; 248 | var d_status = $("#d_status"+taskid).val(); 249 | var d_tasktype = $("#d_tasktype"+taskid).val(); 250 | var d_flag = $("#dflag"+taskid).val(); 251 | if (d_flag != 4 && d_status == 2) { 252 | if (d_tasktype == 0) { 253 | bt_task_list.push(taskid); 254 | } else { 255 | normal_task_list.push(taskid); 256 | }; 257 | }; 258 | }); 259 | 260 | if (bt_task_list.length) { 261 | show_tip("载入中..."); 262 | $.getJSON(INTERFACE_URL+"/fill_bt_list?tid="+bt_task_list.join(",")+"&g_net="+G_section+"&uid="+G_USERID+"&callback=?", function(data) { 263 | hide_tip(); 264 | var todown = {}; 265 | todown.gdriveid = getCookie("gdriveid"); 266 | todown.tasklist = {}; 267 | $.each(data['Result'], function(n, e) { 268 | var info = get_taskinfo($("#tr_c"+n)); 269 | todown.tasklist[n] = build_bt_taskinfo(info, e); 270 | }); 271 | $.each(normal_task_list, function(n, e) { 272 | var info = get_taskinfo($("#tr_c"+e)); 273 | todown.tasklist[e] = build_normal_taskinfo(info); 274 | }); 275 | _do(todown); 276 | }); 277 | } else { 278 | var todown = {}; 279 | todown.gdriveid = getCookie("gdriveid"); 280 | todown.tasklist = {}; 281 | $.each(normal_task_list, function(n, e) { 282 | var info = get_taskinfo($("#tr_c"+e)); 283 | todown.tasklist[e] = build_normal_taskinfo(info); 284 | }); 285 | _do(todown); 286 | }; 287 | }; 288 | 289 | TLE.bt_down = function(_this, _do) { 290 | var ck = document.getElementsByName("bt_list_ck"); 291 | var files = []; 292 | $.each(ck, function(n, e) { 293 | if (e.checked == false) return; 294 | var fid = e.getAttribute("_i"); 295 | var file = { 296 | 'title': $("#bt_taskname"+fid).val(), 297 | 'url': $("#bturl"+fid).val(), 298 | 'downurl': $("#btdownurl"+fid).val(), 299 | 'cid': $("#btcid"+fid).val(), 300 | 'gcid': $("#btgcid"+fid).val(), 301 | 'filesize': $("#bt_filesize"+fid).val(), 302 | }; 303 | files.push(file); 304 | }); 305 | var taskid = $("#view_bt_taskid").val(); 306 | var info = get_taskinfo($("#tr_c"+taskid)); 307 | 308 | var todown = {}; 309 | todown.gdriveid = getCookie("gdriveid"); 310 | todown.tasklist = {}; 311 | todown.tasklist[taskid] = build_bt_taskinfo(info, files); 312 | //console.log(todown); 313 | 314 | _do(todown); 315 | 316 | //console.log("bt_down"); 317 | }; 318 | 319 | TLE.bt_down_one = function(_this, _do) { 320 | var files = [] 321 | var fid = $(_this).parents(".rw_list").attr("i"); 322 | var file = { 323 | 'title': $("#bt_taskname"+fid).val(), 324 | 'url': $("#bturl"+fid).val(), 325 | 'downurl': $("#btdownurl"+fid).val(), 326 | 'cid': $("#btcid"+fid).val(), 327 | 'gcid': $("#btgcid"+fid).val(), 328 | 'filesize': $("#bt_filesize"+fid).val(), 329 | }; 330 | files.push(file); 331 | var taskid = $("#view_bt_taskid").val(); 332 | var info = get_taskinfo($("#tr_c"+taskid)); 333 | 334 | var todown = {}; 335 | todown.gdriveid = getCookie("gdriveid"); 336 | todown.tasklist = {}; 337 | todown.tasklist[taskid] = build_bt_taskinfo(info, files); 338 | //console.log(todown); 339 | 340 | _do(todown); 341 | 342 | //console.log("bt_down"); 343 | }; 344 | 345 | TLE.getbtn = function(_this) { 346 | $(_this).parents(".TLE_get_btnbox").find(".TLE_p_getbtn").toggle(); 347 | close_rightmenu_layer(); 348 | return false; 349 | }; 350 | 351 | TLE.text_pop = function(title, content) { 352 | content = $('
').text(content).html() 353 | content = '' 354 | $("#TLE_text_pop").tpl("TLE_text_tpl", {'title': title, 'content': content}).show().pop({ 355 | onHide: function() { $(document.body).click(); }, 356 | }); 357 | }; 358 | TLE.file_pop = function(title, content, filename) { 359 | var url = "data:text/html;charset=utf-8,"+encodeURIComponent(content); 360 | var content = '
' 361 | +'
' 362 | +'导出文件' 363 | +(isChrome ? '' : '(右键另存为'+filename+')') 364 | +'
' 365 | +'
' 366 | $("#TLE_text_pop").tpl("TLE_text_tpl", {'title': title, 'content': content}).show().pop({ 367 | onHide: function() { $(document.body).click(); }, 368 | }); 369 | }; 370 | TLE.window_pop = function(title, content) { 371 | $("#TLE_text_pop").tpl("TLE_text_tpl", {'title': title, 'content': content}).show().pop({ 372 | onHide: function() { $(document.body).click(); }, 373 | }); 374 | }; 375 | 376 | TLE.multiple_server_fix = function(url) { 377 | return "'"+url.replace("gdl", "'{gdl,dl.{f,g,h,i,twin}}'")+"'"; 378 | } 379 | 380 | function encode_utf8(s) { 381 | return unescape( encodeURIComponent( s ) ); 382 | }; 383 | function to_hex(num) { 384 | var s = num.toString(16); 385 | if (s.length == 1) 386 | return '0'+s; 387 | else 388 | return s; 389 | }; 390 | var thunder_filename_mask = [0x61, 0x31, 0xe4, 0x5f, 0x00, 0x00, 0x00, 0x00]; 391 | function thunder_filename_encode(filename) { 392 | var result = ["01", ]; 393 | $.each(encode_utf8(filename), function(i, n) { 394 | result.push(to_hex(n.charCodeAt(0)^thunder_filename_mask[i%8]).toUpperCase()) 395 | }); 396 | while (result.length % 8 != 1) { 397 | result.push(to_hex(thunder_filename_mask[(result.length-1)%8]).toUpperCase()); 398 | } 399 | return result.join(""); 400 | }; 401 | 402 | TLE.url_rewrite = function(url, filename) { 403 | url = url.replace(/&n=\w+/, "&n="+thunder_filename_encode(filename)); 404 | return url; 405 | }; 406 | 407 | var alpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"; 408 | TLE.escape_command = function(str) { 409 | var result = ""; 410 | for (var i = 0; i < str.length; i++) { 411 | if (alpha.indexOf(str[i]) == -1) 412 | result += "\\"+str[i]; 413 | else 414 | result += str[i]; 415 | } 416 | return result; 417 | }; 418 | 419 | //setting 420 | TLE.getConfig = function(key) { 421 | if (window.localStorage) { 422 | return window.localStorage.getItem(key) || ""; 423 | } else { 424 | return getCookie(key); 425 | } 426 | }; 427 | TLE.setConfig = function(key, value) { 428 | if (window.localStorage) { 429 | window.localStorage.setItem(key, value); 430 | } else { 431 | setGdCookie(key, value, 86400*365); 432 | } 433 | }; 434 | //set default config 435 | if (TLE.getConfig("TLE_exporter") == "") { 436 | var exporters = []; 437 | for (var key in TLE.exporter) { 438 | exporters.push(key); 439 | }; 440 | TLE.setConfig("TLE_exporter", exporters.join("|")); 441 | }; 442 | 443 | function init() { 444 | //css 445 | $("head").append(''); 466 | //pop 467 | $("body").append(''); 468 | $("body").append(''); 469 | $("#TLE_text_tpl").text('
' 470 | +'
' 471 | +'

$[title]

' 472 | +'
' 473 | +'
' 474 | +'$[content]' 475 | +'
' 476 | +'关闭' 477 | +'
'); 478 | $("#setting_main_tpl").text($("#setting_main_tpl").text().replace(/(<\/div>\s+
)/, 479 | '
' 480 | +'

Thunder Lixian Exporter 设定

' 481 | +'
    ' 482 | +'
  • 启用以下导出器
  • ' 483 | +'
  • '+(function(){ 484 | var enabled_exporter = TLE.getConfig("TLE_exporter").split("|"); 485 | var str = ''; 486 | for (var name in TLE.exporter) { 487 | str += ''+name+''; 488 | } 489 | return str; 490 | })()+'
  • ' 491 | +'
  • Aria2 JSON-RPC Path
  • ' 492 | +'
  • Path:
  • ' 493 | +'
' 494 | +'$1')); 495 | var _set_notice_submit = set_notice_submit; 496 | set_notice_submit = function(f) { 497 | _set_notice_submit(f); 498 | var enabled_exporter = []; 499 | $(".TLE_setting_ck").each(function(n, e) { 500 | if (e.checked) enabled_exporter.push(e.name.replace(/^TLE_ck_/, "")); 501 | }); 502 | var config_str = (enabled_exporter.length == 0) ? "_" : enabled_exporter.join("|"); 503 | var jsonrpc_path = $("#TLE_aria2_jsonrpc").val(); 504 | if (TLE.getConfig("TLE_exporter") != config_str || TLE.getConfig("TLE_aria2_jsonrpc") != jsonrpc_path) { 505 | TLE.setConfig("TLE_exporter", config_str); 506 | TLE.setConfig("TLE_aria2_jsonrpc", jsonrpc_path); 507 | TS2.show('设置已生效',1); 508 | setTimeout(function(){ 509 | setting.hide(); 510 | location.reload(true); 511 | }, 1*1000); 512 | } 513 | }; 514 | 515 | function exporter_anchors(type) { 516 | var enabled_exporter = TLE.getConfig("TLE_exporter").split("|"); 517 | var str = ''; 518 | $.each(TLE.exporter, function(n, f) { 519 | if (enabled_exporter.indexOf(n) == -1) return; 520 | str+=(''+n+''); 521 | }); 522 | return str; 523 | } 524 | //down 525 | $(".rwbtn.ic_redownloca").each(function(n, e) { 526 | $(e).after('
' 527 | + '' 528 | + '取回本地' 529 | + '' 530 | + '' 531 | + '' 534 | + '
'); 535 | }); 536 | 537 | //batch_down 538 | $("#li_task_down,#li_task_download").after('批量导出') 539 | .parents(".main_link").append( 540 | ''); 543 | var _task_check_click = task_check_click; 544 | task_check_click = function() { 545 | _task_check_click(); 546 | if ($("#li_task_down,#li_task_download").hasClass("noit")) { 547 | $("#TLE_batch_down").addClass("noit").unbind("click"); 548 | } else { 549 | $("#TLE_batch_down").removeClass("noit").unbind("click").click(function() { 550 | $("#TLE_batch_getbtn").css("left", $("#TLE_batch_down").position().left); 551 | $("#TLE_batch_getbtn").toggle(); 552 | return false; 553 | }); 554 | }; 555 | //console.log("task_check_click called"); 556 | }; 557 | $('input[name=ck],input#ckbutton').click(task_check_click); 558 | 559 | //bt_down 560 | $("#view_bt_list_nav_tpl").text($("#view_bt_list_nav_tpl").text().replace('取回本地', 561 | '取回本地' 562 | +'批量导出' 563 | +'')); 566 | $("#view_bt_list_tpl").text($("#view_bt_list_tpl").text().replace('ic_redownloca" title="">取回本地', 567 | 'ic_redownloca" title="">取回本地' 568 | +'
' 569 | + '' 570 | + '取回本地' 571 | + '' 572 | + '' 573 | + '' 576 | + '
')); 577 | var _bt_view_nav = bt_view_nav; 578 | bt_view_nav = function() { 579 | _bt_view_nav(); 580 | if ($("#view_bt_list_nav_down").hasClass("noit")) { 581 | $("#TLE_bt_down").addClass("noit").unbind("click"); 582 | } else { 583 | $("#TLE_bt_down").removeClass("noit").unbind("click").click(function() { 584 | $("#TLE_bt_getbtn").css("left", $("#TLE_bt_down").position().left); 585 | $("#TLE_bt_getbtn").toggle(); 586 | return false; 587 | }); 588 | }; 589 | $("#TLE_bt_getbtn").hide(); 590 | //console.log("bt_view_nav called"); 591 | }; 592 | 593 | //close menu binding 594 | $(document.body).bind("click",function(){ 595 | $("div.TLE_p_getbtn, #TLE_batch_getbtn, #TLE_bt_getbtn").hide(); 596 | }); 597 | $("div.rw_list").click(function(e){ 598 | $("div.TLE_p_getbtn, #TLE_batch_getbtn, #TLE_bt_getbtn").hide(); 599 | }); 600 | $("div.TLE_get_btnbox").click(function(e){e.stopPropagation();}); 601 | }; 602 | 603 | init(); 604 | })(TLE); 605 | 606 | var ARIA2 = (function() { 607 | var jsonrpc_version = '2.0'; 608 | 609 | function get_auth(url) { 610 | return url.match(/^(?:(?![^:@]+:[^:@\/]*@)[^:\/?#.]+:)?(?:\/\/)?(?:([^:@]*(?::[^:@]*)?)?@)?/)[1]; 611 | }; 612 | 613 | function request(jsonrpc_path, method, params) { 614 | var request_obj = { 615 | jsonrpc: jsonrpc_version, 616 | method: method, 617 | id: (new Date()).getTime().toString(), 618 | }; 619 | if (params) request_obj['params'] = params; 620 | 621 | var xhr = new XMLHttpRequest(); 622 | var auth = get_auth(jsonrpc_path); 623 | jsonrpc_path = jsonrpc_path.replace(/^((?![^:@]+:[^:@\/]*@)[^:\/?#.]+:)?(\/\/)?(?:(?:[^:@]*(?::[^:@]*)?)?@)?(.*)/, '$1$2$3'); // auth string not allowed in url for firefox 624 | xhr.open("POST", jsonrpc_path+"?tm="+(new Date()).getTime().toString(), true); 625 | xhr.setRequestHeader("Content-Type", "application/x-www-form-urlencoded; charset=UTF-8"); 626 | if (auth) xhr.setRequestHeader("Authorization", "Basic "+btoa(auth)); 627 | xhr.send(JSON.stringify(request_obj)); 628 | }; 629 | 630 | return function(jsonrpc_path) { 631 | this.jsonrpc_path = jsonrpc_path; 632 | this.addUri = function (uri, options) { 633 | request(this.jsonrpc_path, 'aria2.addUri', [[uri, ], options]); 634 | }; 635 | return this; 636 | } 637 | })(); 638 | } // end of wrapper 639 | 640 | function tle_lx3_wrapper() { 641 | seajs.use("jquery", function(){ 642 | // vim: set et sw=2 ts=2 sts=2 ff=unix fenc=utf8: 643 | // Author: Binux 644 | // http://binux.me 645 | // Created on 2013-12-27 23:00:34 646 | 647 | TLE = {}; 648 | 649 | TLE.exporter = { 650 | '复制链接': function(todown) { 651 | //console.log(todown); 652 | var str = '
    '; 653 | $.each(todown.tasklist, function(n, task) { 654 | $.each(task.filelist, function(l, file) { 655 | if (!file.downurl) return; 656 | str += '
  • '+file.title+'
  • '; 657 | }); 658 | }); 659 | str += "
"; 660 | TLE.window_pop('复制选中的链接 > 在新窗口中打开', str); 661 | }, 662 | 'Aria2': function(todown) { 663 | //console.log(todown); 664 | var str = ""; 665 | $.each(todown.tasklist, function(n, task) { 666 | $.each(task.filelist, function(l, file) { 667 | if (!file.downurl) return; 668 | var filepath = TLE.safe_title(file.title); 669 | if (task.tasktype === 0 && task.filelist.length > 1) 670 | filepath = TLE.safe_title(task.taskname) + "/" + TLE.safe_title(file.title.replace(/\\+\*?/g,"/")); 671 | str += "aria2c -c -s10 -x10 --out "+TLE.escape_command(filepath)+" --header 'Cookie: gdriveid="+todown.gdriveid+";' '"+file.downurl+"'\n"; 672 | }); 673 | }); 674 | TLE.text_pop("aria2 download command", str); 675 | }, 676 | 'wget': function(todown) { 677 | //console.log(todown); 678 | var str = ""; 679 | $.each(todown.tasklist, function(n, task) { 680 | $.each(task.filelist, function(l, file) { 681 | if (!file.downurl) return; 682 | str += "wget -c -O "+TLE.escape_command(TLE.safe_title(file.title))+" --header 'Cookie: gdriveid="+todown.gdriveid+";' '"+file.downurl+"'\n"; 683 | }); 684 | }); 685 | TLE.text_pop("wget download command", str); 686 | }, 687 | 'mpv': function(todown) { 688 | //console.log(todown); 689 | var str = ""; 690 | $.each(todown.tasklist, function(n, task) { 691 | $.each(task.filelist, function(l, file) { 692 | if (!file.downurl) return; 693 | str += "mpv --really-quiet --cache 8140 --cache-default 8140 --http-header-fields 'Cookie: gdriveid="+todown.gdriveid+";' '"+file.downurl+"'\n"; 694 | }); 695 | }); 696 | TLE.text_pop("play with mpv", str); 697 | }, 698 | 'mplayer': function(todown) { 699 | //console.log(todown); 700 | var str = ""; 701 | $.each(todown.tasklist, function(n, task) { 702 | $.each(task.filelist, function(l, file) { 703 | if (!file.downurl) return; 704 | str += "mplayer -really-quiet -cache 8140 -http-header-fields 'Cookie: gdriveid="+todown.gdriveid+";' '"+file.downurl+"'\n"; 705 | }); 706 | }); 707 | TLE.text_pop("play with mplayer", str); 708 | }, 709 | "YAAW": function(todown) { 710 | if (TLE.getConfig("TLE_aria2_jsonrpc")) { 711 | TLE.tip("添加中...到YAAW界面查看是否添加成功"); 712 | var aria2 = new ARIA2(TLE.getConfig("TLE_aria2_jsonrpc")); 713 | $.each(todown.tasklist, function(n, task) { 714 | $.each(task.filelist, function(l, file) { 715 | if (!file.downurl) return; 716 | var filepath = TLE.safe_title(file.title); 717 | if (task.tasktype === 0 && task.filelist.length > 1) 718 | filepath = TLE.safe_title(task.taskname) + "/" + TLE.safe_title(file.title.replace(/\\+\*?/g,"/")); 719 | aria2.addUri(file.downurl, {out: filepath, header: 'Cookie: gdriveid='+todown.gdriveid}); 720 | }); 721 | }); 722 | TLE.hide_tip(); 723 | } else { 724 | TLE.tip("尚未设置Aria2 JSONRPC地址", 5); 725 | } 726 | }, 727 | 'Aria2导出': function(todown) { 728 | //console.log(todown); 729 | var str = ""; 730 | $.each(todown.tasklist, function(n, task) { 731 | $.each(task.filelist, function(l, file) { 732 | if (!file.downurl) return; 733 | var filepath = TLE.safe_title(file.title); 734 | if (task.tasktype === 0 && task.filelist.length > 1) 735 | filepath = TLE.safe_title(task.taskname) + "/" + TLE.safe_title(file.title.replace(/\\+\*?/g,"/")); 736 | str += file.downurl+'\r\n out='+filepath+'\r\n header=Cookie: gdriveid='+todown.gdriveid+'\r\n continue=true\r\n max-connection-per-server=5\r\n split=10\r\n parameterized-uri=true\r\n\r\n'; 737 | }); 738 | }); 739 | TLE.file_pop("Aria2导出文件下载", str, "aria2.down"); 740 | }, 741 | 'IDM导出': function(todown) { 742 | //console.log(todown); 743 | var str = ""; 744 | $.each(todown.tasklist, function(n, task) { 745 | $.each(task.filelist, function(l, file) { 746 | if (!file.downurl) return; 747 | str += '<\r\n'+TLE.url_rewrite(file.downurl, TLE.safe_title(file.title))+'\r\ncookie: gdriveid='+todown.gdriveid+'\r\n>\r\n'; 748 | }); 749 | }); 750 | TLE.file_pop("IDM导出文件下载", str, "idm.ef2"); 751 | }, 752 | 'Orbit导出': function(todown) { 753 | //console.log(todown); 754 | var str = ""; 755 | $.each(todown.tasklist, function(n, task) { 756 | $.each(task.filelist, function(l, file) { 757 | if (!file.downurl) return; 758 | str += TLE.url_rewrite(file.downurl, TLE.safe_title(file.title))+'|'+TLE.safe_title(file.title.replace("|", "_"))+'||gdriveid='+todown.gdriveid+'\r\n'; 759 | }); 760 | }); 761 | TLE.file_pop("Orbit导出文件下载", str, "orbit.olt"); 762 | }, 763 | 'eagleget导出': function(todown) { 764 | var ret = {tasks: []}; 765 | $.each(todown.tasklist, function(n, task) { 766 | $.each(task.filelist, function(l, file) { 767 | if (!file.downurl) return; 768 | ret.tasks.push({ 769 | cookie: 'gdriveid='+todown.gdriveid, 770 | fname: TLE.safe_title(file.title), 771 | url: TLE.url_rewrite(file.downurl, TLE.safe_title(file.title)) 772 | }); 773 | }); 774 | }); 775 | TLE.file_pop("Eagleget导出文件下载(test)", JSON.stringify(ret), "eagleget.eg"); 776 | }, 777 | }; 778 | 779 | (function(TLE) { 780 | function get_taskinfo(taskid) { 781 | return { 782 | taskname: $("#tr_c"+taskid+" .title .w_title").attr("title"), 783 | title: $("#tr_c"+taskid+" .title .w_title").attr("title"), 784 | f_url: $("#f_url"+taskid).val(), 785 | downurl: $("#dl_url"+taskid).val(), 786 | cid: $("#dcid"+taskid).val(), 787 | gcid: $("#gcid"+taskid).val(), 788 | size: parseInt($("#ysfilesize"+taskid).val()), 789 | tasktype: parseInt($("#d_tasktype"+taskid).val()), 790 | status: $("#d_status"+taskid).val(), 791 | }; 792 | } 793 | function get_bt_taskinfo(taskid) { 794 | return { 795 | title: $("#bt_taskname"+taskid).val(), 796 | f_url: $("#bturl"+taskid).val(), 797 | downurl: $("#btdownurl"+taskid).val(), 798 | cid: $("#btcid"+taskid).val(), 799 | gcid: $("#btgcid"+taskid).val(), 800 | size: $("#bt_filesize"+taskid).val(), 801 | status: $("#btd_status"+taskid).val(), 802 | }; 803 | } 804 | 805 | function build_bt_taskinfo(info, rdata) { 806 | var taskinfo = { 807 | 'taskname': info.taskname, 808 | 'f_url': info.f_url, 809 | 'cid': info.dcid, 810 | 'size': parseInt(info.ysfilesize), 811 | 'tasktype': parseInt(info.d_tasktype), 812 | 'status': info.d_status, 813 | }; 814 | var filelist = []; 815 | $.each(rdata, function(n, e) { 816 | filelist.push({ 817 | 'title': e.title, 818 | 'f_url': e.url, 819 | 'downurl': e.downurl, 820 | 'cid': e.cid, 821 | 'gcid': e.gcid, 822 | 'size': parseInt(e.filesize), 823 | }); 824 | }); 825 | taskinfo['filelist'] = filelist; 826 | return taskinfo; 827 | }; 828 | 829 | TLE.safe_title = function safe_title(title) { 830 | return title.replace(/[\\\|\:\*\"\?\<\>]/g,"_"); 831 | }; 832 | 833 | TLE.bt_down = function(_this, _do) { 834 | var ck = document.getElementsByName("bt_list_ck"); 835 | var files = []; 836 | $.each(ck, function(n, e) { 837 | if (e.checked == false) return; 838 | var fid = e.getAttribute("_i"); 839 | var file = { 840 | 'title': $("#bt_taskname"+fid).val(), 841 | 'url': $("#bturl"+fid).val(), 842 | 'downurl': $("#btdownurl"+fid).val(), 843 | 'cid': $("#btcid"+fid).val(), 844 | 'gcid': $("#btgcid"+fid).val(), 845 | 'filesize': $("#bt_filesize"+fid).val(), 846 | }; 847 | files.push(file); 848 | }); 849 | var taskid = $("#view_bt_taskid").val(); 850 | var info = get_taskinfo($("#tr_c"+taskid)); 851 | 852 | var todown = {}; 853 | todown.gdriveid = getCookie("gdriveid") || $("#cok").val(); 854 | todown.tasklist = {}; 855 | todown.tasklist[taskid] = build_bt_taskinfo(info, files); 856 | //console.log(todown); 857 | 858 | _do(todown); 859 | 860 | //console.log("bt_down"); 861 | }; 862 | 863 | TLE.text_pop = function(title, content) { 864 | content = $('
').text(content).html() 865 | content = '' 866 | $("#TLE_text_pop").tpl("TLE_text_tpl", {'title': title, 'content': content}).show().pop({ 867 | //onHide: function() { $(document.body).click(); }, 868 | }); 869 | }; 870 | TLE.file_pop = function(title, content, filename) { 871 | var url = "data:text/html;charset=utf-8,"+encodeURIComponent(content); 872 | if (isChrome) { 873 | $('').appendTo('body').get(0).click(); 874 | } else { 875 | var content = '
' 876 | +'
' 877 | +'导出文件' 878 | +(isChrome ? '' : '(右键另存为'+filename+')') 879 | +'
' 880 | +'
' 881 | $("#TLE_text_pop").tpl("TLE_text_tpl", {'title': title, 'content': content}).show().pop({ 882 | //onHide: function() { $(document.body).click(); }, 883 | }); 884 | } 885 | }; 886 | TLE.window_pop = function(title, content) { 887 | $("#TLE_text_pop").tpl("TLE_text_tpl", {'title': title, 'content': content}).show().pop({ 888 | //onHide: function() { $(document.body).click(); }, 889 | }); 890 | }; 891 | TLE.tip = function(content, time) { 892 | TS2.show(content, time); 893 | }; 894 | TLE.hide_tip = function() { 895 | TS2.hide(); 896 | }; 897 | 898 | TLE.multiple_server_fix = function(url) { 899 | return "'"+url.replace("gdl", "'{gdl,dl.{f,g,h,i,twin}}'")+"'"; 900 | } 901 | 902 | function encode_utf8(s) { 903 | return unescape( encodeURIComponent( s ) ); 904 | }; 905 | function to_hex(num) { 906 | var s = num.toString(16); 907 | if (s.length == 1) 908 | return '0'+s; 909 | else 910 | return s; 911 | }; 912 | var thunder_filename_mask = [0x61, 0x31, 0xe4, 0x5f, 0x00, 0x00, 0x00, 0x00]; 913 | function thunder_filename_encode(filename) { 914 | var result = ["01", ]; 915 | $.each(encode_utf8(filename), function(i, n) { 916 | result.push(to_hex(n.charCodeAt(0)^thunder_filename_mask[i%8]).toUpperCase()) 917 | }); 918 | while (result.length % 8 != 1) { 919 | result.push(to_hex(thunder_filename_mask[(result.length-1)%8]).toUpperCase()); 920 | } 921 | return result.join(""); 922 | }; 923 | 924 | TLE.url_rewrite = function(url, filename) { 925 | url = url.replace(/&n=\w+/, "&n="+thunder_filename_encode(filename)); 926 | return url; 927 | }; 928 | 929 | var alpha = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"; 930 | TLE.escape_command = function(str) { 931 | var result = ""; 932 | for (var i = 0; i < str.length; i++) { 933 | if (alpha.indexOf(str[i]) == -1) 934 | result += "\\"+str[i]; 935 | else 936 | result += str[i]; 937 | } 938 | return result; 939 | }; 940 | 941 | //setting 942 | TLE.getConfig = function(key) { 943 | if (window.localStorage) { 944 | return window.localStorage.getItem(key) || ""; 945 | } else { 946 | return getCookie(key); 947 | } 948 | }; 949 | TLE.setConfig = function(key, value) { 950 | if (window.localStorage) { 951 | window.localStorage.setItem(key, value); 952 | } else { 953 | setGdCookie(key, value, 86400*365); 954 | } 955 | }; 956 | //set default config 957 | if (TLE.getConfig("TLE_exporter") == "") { 958 | var exporters = []; 959 | for (var key in TLE.exporter) { 960 | exporters.push(key); 961 | }; 962 | TLE.setConfig("TLE_exporter", exporters.join("|")); 963 | }; 964 | 965 | function init() { 966 | //css 967 | $("head").append(''); 973 | //pop 974 | $("body").append(''); 975 | $("body").append(''); 976 | $("#TLE_text_tpl").text('
' 977 | +'
' 978 | +'

$[title]

' 979 | +'
' 980 | +'
' 981 | +'$[content]' 982 | +'
' 983 | +'关闭浮层' 984 | +'
'); 985 | //setting 986 | $("#view_down_bar ul").prepend('
  • TLE设置
  • '); 987 | $("#TLE_setting_anchor").on("click", function() { 988 | var content = '
      ' 989 | +'
    • 启用以下导出器
    • ' 990 | +'
    • '+(function(){ 991 | var enabled_exporter = TLE.getConfig("TLE_exporter").split("|"); 992 | var str = ''; 993 | for (var name in TLE.exporter) { 994 | str += ''+name+''; 995 | } 996 | return str; 997 | })()+'
    • ' 998 | +'
    • Aria2 JSON-RPC Path
    • ' 999 | +'
    • Path:
    • ' 1000 | +'
    ' 1001 | +'
    ' 1002 | +'
    ' 1003 | +''; 1004 | $("#TLE_text_pop").tpl("TLE_text_tpl", {title: "Thunder Lixian Exporter 设定", content: content}).show().pop(); 1005 | $("#TLE_setting_ok").on("click", function() { 1006 | var enabled_exporter = []; 1007 | $(".TLE_setting_ck").each(function(n, e) { 1008 | if (e.checked) enabled_exporter.push(e.name.replace(/^TLE_ck_/, "")); 1009 | }); 1010 | var config_str = (enabled_exporter.length == 0) ? "_" : enabled_exporter.join("|"); 1011 | var jsonrpc_path = $("#TLE_aria2_jsonrpc").val(); 1012 | if (TLE.getConfig("TLE_exporter") != config_str || TLE.getConfig("TLE_aria2_jsonrpc") != jsonrpc_path) { 1013 | TLE.setConfig("TLE_exporter", config_str); 1014 | TLE.setConfig("TLE_aria2_jsonrpc", jsonrpc_path); 1015 | } 1016 | $("a.pop_close:visible").click(); 1017 | TLE.tip("配置已保存", 5); 1018 | }); 1019 | }); 1020 | //download binding 1021 | function exporter_anchors() { 1022 | var enabled_exporter = TLE.getConfig("TLE_exporter").split("|"); 1023 | var str = ''; 1024 | $.each(TLE.exporter, function(n, f) { 1025 | if (enabled_exporter.indexOf(n) == -1) return; 1026 | str+=('
  • '+n+'
  • '); 1027 | }); 1028 | return str; 1029 | } 1030 | function show_exporter_selector() { 1031 | $("#TLE_text_pop").tpl("TLE_text_tpl", {title: "您正在使用Thunder Lixian Exporter", 1032 | content: ''}).pop(); 1035 | } 1036 | 1037 | TLE.todown = {}; 1038 | window.thunder_download = function(taskid, type) { 1039 | TLE.todown = {}; 1040 | TLE.todown.gdriveid = getCookie("gdriveid") || $("#cok").val(); 1041 | if (type === 1) { 1042 | // bt_down_one 1043 | var taskinfo = { 1044 | taskname: $("#bt_info_list .title .w").text(), 1045 | f_url: null, 1046 | cid: null, 1047 | size: null, 1048 | tasktype: 0, 1049 | status: 2, 1050 | } 1051 | var filelist = []; 1052 | filelist.push(get_bt_taskinfo(taskid)); 1053 | taskinfo['filelist'] = filelist; 1054 | TLE.todown.tasklist = {}; 1055 | TLE.todown.tasklist['0'] = taskinfo; 1056 | } else { 1057 | // down 1058 | var taskinfo = get_taskinfo(taskid); 1059 | var filelist = []; 1060 | filelist.push(get_taskinfo(taskid)); 1061 | taskinfo['filelist'] = filelist; 1062 | TLE.todown.tasklist = {}; 1063 | TLE.todown.tasklist[taskid] = taskinfo; 1064 | } 1065 | show_exporter_selector(); 1066 | } 1067 | window.bt_task_down = function(cid, taskid) { 1068 | // bt_down 1069 | batch_down_all_f([taskid, ]); 1070 | } 1071 | window.batch_down_all_f = function(taskids) { 1072 | // batch_down 1073 | if (!taskids) { 1074 | taskids = []; 1075 | $("span[name=ck][checked]").each(function(n, e) { 1076 | taskids.push($(e).attr("value")); 1077 | }); 1078 | } 1079 | 1080 | var bt_task_list = [], normal_task_list = []; 1081 | $.each(taskids, function(n, taskid) { 1082 | var d_status = $("#d_status"+taskid).val(); 1083 | var d_tasktype = parseInt($("#d_tasktype"+taskid).val()); 1084 | var d_flag = $("#dflag"+taskid).val(); 1085 | if (d_flag != 4 && d_status == 2) { 1086 | if (d_tasktype == 0) { 1087 | bt_task_list.push(taskid); 1088 | } else { 1089 | normal_task_list.push(taskid); 1090 | }; 1091 | }; 1092 | }); 1093 | 1094 | if (bt_task_list.length) { 1095 | TLE.tip("载入中..."); 1096 | $.getJSON(INTERFACE_URL+"/fill_bt_list?tid="+bt_task_list.join(",")+"&g_net="+G_section+"&uid="+G_USERID+"&callback=?", function(data) { 1097 | TLE.hide_tip(); 1098 | var todown = {}; 1099 | todown.gdriveid = getCookie("gdriveid") || $("#cok").val(); 1100 | todown.tasklist = {}; 1101 | $.each(data['Result'], function(n, e) { 1102 | var taskinfo = get_taskinfo(n); 1103 | var filelist = []; 1104 | $.each(e, function(n, e) { 1105 | filelist.push({ 1106 | title: e.title, 1107 | f_url: e.url, 1108 | downurl: e.downurl, 1109 | cid: e.cid, 1110 | gcid: e.gcid, 1111 | size: parseInt(e.filesize), 1112 | }); 1113 | }); 1114 | taskinfo.filelist = filelist; 1115 | todown.tasklist[n] = taskinfo; 1116 | }); 1117 | $.each(normal_task_list, function(n, e) { 1118 | var taskinfo = get_taskinfo(e); 1119 | taskinfo['filelist'] = taskinfo; 1120 | todown.tasklist[e] = taskinfo; 1121 | }); 1122 | TLE.todown = todown; 1123 | show_exporter_selector(); 1124 | }); 1125 | } else { 1126 | var todown = {}; 1127 | todown.gdriveid = getCookie("gdriveid") || $("#cok").val(); 1128 | todown.tasklist = {}; 1129 | $.each(normal_task_list, function(n, e) { 1130 | var taskinfo = get_taskinfo(e); 1131 | taskinfo['filelist'] = taskinfo; 1132 | todown.tasklist[e] = taskinfo; 1133 | }); 1134 | TLE.todown = todown; 1135 | show_exporter_selector(); 1136 | }; 1137 | }; 1138 | window.batch_down_bt = function() { 1139 | var taskids = []; 1140 | $("span[name=bt_list_ck][checked]").each(function(n, e) { 1141 | var taskid = $(e).attr("value"); 1142 | if ($("#btd_status"+taskid).val() == 2) 1143 | taskids.push(taskid); 1144 | }); 1145 | 1146 | TLE.todown = {}; 1147 | TLE.todown.gdriveid = getCookie("gdriveid") || $("#cok").val(); 1148 | var taskinfo = { 1149 | taskname: $("#bt_info_list .title .w").text(), 1150 | f_url: null, 1151 | cid: null, 1152 | size: null, 1153 | tasktype: 0, 1154 | status: 2, 1155 | } 1156 | var filelist = []; 1157 | $.each(taskids, function(n, e) { 1158 | filelist.push(get_bt_taskinfo(e)); 1159 | }); 1160 | taskinfo['filelist'] = filelist; 1161 | TLE.todown.tasklist = {}; 1162 | TLE.todown.tasklist['0'] = taskinfo; 1163 | show_exporter_selector(); 1164 | } 1165 | } 1166 | 1167 | init(); 1168 | })(TLE); 1169 | 1170 | var ARIA2 = (function() { 1171 | var jsonrpc_version = '2.0'; 1172 | 1173 | function get_auth(url) { 1174 | return url.match(/^(?:(?![^:@]+:[^:@\/]*@)[^:\/?#.]+:)?(?:\/\/)?(?:([^:@]*(?::[^:@]*)?)?@)?/)[1]; 1175 | }; 1176 | 1177 | function request(jsonrpc_path, method, params) { 1178 | var request_obj = { 1179 | jsonrpc: jsonrpc_version, 1180 | method: method, 1181 | id: (new Date()).getTime().toString(), 1182 | }; 1183 | if (params) request_obj['params'] = params; 1184 | 1185 | var xhr = new XMLHttpRequest(); 1186 | var auth = get_auth(jsonrpc_path); 1187 | jsonrpc_path = jsonrpc_path.replace(/^((?![^:@]+:[^:@\/]*@)[^:\/?#.]+:)?(\/\/)?(?:(?:[^:@]*(?::[^:@]*)?)?@)?(.*)/, '$1$2$3'); // auth string not allowed in url for firefox 1188 | xhr.open("POST", jsonrpc_path+"?tm="+(new Date()).getTime().toString(), false); 1189 | xhr.setRequestHeader("Content-Type", "application/x-www-form-urlencoded; charset=UTF-8"); 1190 | if (auth) xhr.setRequestHeader("Authorization", "Basic "+btoa(auth)); 1191 | xhr.send(JSON.stringify(request_obj)); 1192 | }; 1193 | 1194 | return function(jsonrpc_path) { 1195 | this.jsonrpc_path = jsonrpc_path; 1196 | this.addUri = function (uri, options) { 1197 | request(this.jsonrpc_path, 'aria2.addUri', [[uri, ], options]); 1198 | }; 1199 | return this; 1200 | } 1201 | })(); 1202 | }); // end of seajs.use 1203 | } // end of wrapper 1204 | 1205 | function onload(func) { 1206 | if (document.readyState === "complete") { 1207 | func(); 1208 | } else { 1209 | window.addEventListener('load', func); 1210 | } 1211 | } 1212 | onload(function(){ 1213 | var script = document.createElement('script'); 1214 | script.id = "TLE_script"; 1215 | if (location.host == "dynamic.cloud.vip.xunlei.com") { 1216 | script.appendChild(document.createTextNode('('+ tle_wrapper +')();')); 1217 | } else if (location.host == "lixian.vip.xunlei.com" || location.host == "jiayuan.xunlei.com") { 1218 | script.appendChild(document.createTextNode('('+ tle_lx3_wrapper +')();')); 1219 | } 1220 | (document.body || document.head || document.documentElement).appendChild(script); 1221 | }); 1222 | -------------------------------------------------------------------------------- /bt.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # vim: set fileencoding=utf8 3 | 4 | import bencode 5 | import os 6 | import sys 7 | import re 8 | from hashlib import sha1 9 | import base64 10 | import requests 11 | import urlparse 12 | import argparse 13 | 14 | s = '\x1b[%d;%dm%s\x1b[0m' # terminual color template 15 | letters = [i for i in '.abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' \ 16 | + '0123456789'] 17 | 18 | ############################################################ 19 | headers = { 20 | "Connection": "keep-alive", 21 | "Accept":"text/html,application/xhtml+xml,\ 22 | application/xml;q=0.9,image/webp,*/*;q=0.8", 23 | "Accept-Encoding":"gzip,deflate,sdch", 24 | "Accept-Language":"en-US,en;q=0.8,zh-CN;\ 25 | q=0.6,zh;q=0.4,zh-TW;q=0.2", 26 | "User-Agent":"Mozilla/5.0 (X11; Linux x86_64) \ 27 | AppleWebKit/537.36 (KHTML, like Gecko) \ 28 | Chrome/40.0.2214.91 Safari/537.36" 29 | } 30 | 31 | ss = requests.session() 32 | ss.headers.update(headers) 33 | 34 | def save_img(url, ext): 35 | path = os.path.join(os.path.expanduser('~'), 'vcode.%s' % ext) 36 | with open(path, 'w') as g: 37 | data = requests.get(url).content 38 | g.write(data) 39 | print " ++ 验证码已保存至", s % (1, 97, path) 40 | input_code = raw_input(s % (2, 92, " 输入验证码: ")) 41 | return input_code 42 | 43 | class Bt(object): 44 | def transfer(self, string, tpath, foo=None, bar=None): 45 | self.dir_dict = {} 46 | self.sub_dir_index = 0 47 | 48 | dstring = bencode.bdecode(string) 49 | files = [] 50 | file_index = 0 51 | 52 | ## change files' name 53 | if dstring['info'].get('files'): 54 | for fl in dstring['info']['files']: 55 | filename = fl['path'][-1] 56 | if args.type_ == 'n': 57 | newfilename = re.sub(foo, bar, filename, re.I) \ 58 | if foo and bar else filename 59 | if filename != newfilename: 60 | print filename, s % (1, 92, '==>'), newfilename 61 | path = [self._get_sub_dir_index(i) \ 62 | for i in fl['path'][:-1]] + [newfilename] 63 | else: 64 | ext = os.path.splitext(filename)[-1] 65 | ext = self._check_ext(ext) 66 | path = [self._get_sub_dir_index(i) \ 67 | for i in fl['path'][:-1]] \ 68 | + ['%s%s' % (file_index, ext)] 69 | file_index += 1 70 | fl['path'] = path 71 | 72 | elif args.type_ == 'be64': 73 | fn, ext = os.path.splitext(filename) 74 | ext = self._check_ext(ext) 75 | tfn = '/'.join(fl['path'][:-1] + [fn]) 76 | e_fn = base64.urlsafe_b64encode(tfn) 77 | fl['path'] = [e_fn + '.base64' + ext] 78 | 79 | for item in fl.keys(): 80 | #if item not in ['path', 'length', 'filehash', 'ed2k']: 81 | if item not in ['path', 'length', 'filehash']: 82 | del fl[item] 83 | 84 | files.append(fl) 85 | dstring['info']['files'] = files 86 | 87 | ## change top directory 88 | for i in dstring['info'].keys(): 89 | if i not in ['files', 'piece length', 'pieces', 'name', 'length']: 90 | del dstring['info'][i] 91 | elif 'name' in i: 92 | if args.name: 93 | dstring['info'][i] = args.name 94 | 95 | ## delete comment and creator 96 | for i in dstring.keys(): 97 | if i not in ['creation date', 'announce', 'info', 'encoding']: 98 | del dstring[i] 99 | 100 | c = bencode.bencode(dstring) 101 | with open(tpath, 'w') as g: 102 | g.write(c) 103 | 104 | def _get_sub_dir_index(self, dir_): 105 | if not self.dir_dict.get(dir_): 106 | self.dir_dict[dir_] = str(self.sub_dir_index) 107 | self.sub_dir_index += 1 108 | return self.dir_dict[dir_] 109 | else: 110 | return self.dir_dict[dir_] 111 | 112 | def _check_ext(self, ext): 113 | if len(ext) > 4: 114 | return '' 115 | 116 | for e in ext: 117 | if e not in letters: 118 | return '' 119 | 120 | return ext 121 | 122 | def get_torrent(self, hh): 123 | print s % (1, 93, '\n ++ get torrent from web') 124 | 125 | def do(url, data=None, timeout=None): 126 | try: 127 | proxies = {'http': args.proxy} if args.proxy else None 128 | r = ss.get(url, proxies=proxies, timeout=timeout) 129 | cnt = r.content 130 | if r.ok and cnt and '' not in cnt \ 131 | and '4:name' in cnt: 132 | print s % (1, 92, ' √ get torrent.') 133 | return cnt 134 | else: 135 | print s % (1, 91, ' × not get.') 136 | return None 137 | except: 138 | return None 139 | 140 | ## xunlei 141 | print s % (1, 94, ' >> try:'), 'bt.box.n0808.com' 142 | url = 'http://bt.box.n0808.com/%s/%s/%s.torrent' \ 143 | % (hh[:2], hh[-2:], hh) 144 | ss.headers['Referer'] = 'http://bt.box.n0808.com' 145 | result = do(url) 146 | if result: return result 147 | 148 | ## https://torrage.com 149 | if ss.headers.get('Referer'): del ss.headers['Referer'] 150 | print s % (1, 94, ' >> try:'), 'torrage.com' 151 | url = 'http://torrage.com/torrent/%s.torrent' % hh 152 | try: 153 | result = do(url) 154 | if result: return result 155 | except: 156 | pass 157 | 158 | ## http://btcache.me 159 | if ss.headers.get('Referer'): del ss.headers['Referer'] 160 | print s % (1, 94, ' >> try:'), 'btcache.me' 161 | url = 'http://btcache.me/torrent/%s' % hh 162 | r = ss.get(url) 163 | key = re.search(r'name="key" value="(.+?)"', r.content) 164 | if key: 165 | url = 'http://btcache.me/captcha' 166 | vcode = save_img(url, 'png') 167 | data = { 168 | "key": key.group(1), 169 | "captcha": vcode 170 | } 171 | ss.headers['Referer'] = url 172 | url = 'http://btcache.me/download' 173 | result = do(url, data=data) 174 | if result: return result 175 | else: 176 | print s % (1, 91, ' × not get.') 177 | 178 | ## torrent stores 179 | if ss.headers.get('Referer'): del ss.headers['Referer'] 180 | urls = [ 181 | #'http://www.sobt.org/Tool/downbt?info=%s', 182 | 'http://www.win8down.com/url.php?hash=%s&name=name', 183 | #'http://www.31bt.com/Torrent/%s', 184 | 'http://178.73.198.210/torrent/%s', 185 | 'http://zoink.it/torrent/%s.torrent', 186 | 'http://torcache.net/torrent/%s.torrent', 187 | 'http://torrentproject.se/torrent/%s.torrent', 188 | 'http://istoretor.com/fdown.php?hash=%s', 189 | 'http://torrentbox.sx/torrent/%s', 190 | 'http://www.torrenthound.com/torrent/%s', 191 | 'http://www.silvertorrent.org/download.php?id=%s', 192 | ] 193 | for url in urls: 194 | print s % (1, 94, ' >> try:'), urlparse.urlparse(url).hostname 195 | url = url % hh 196 | try: 197 | result = do(url) 198 | if result: return result 199 | except: 200 | print s % (1, 91, ' !! Error at connection') 201 | 202 | ## with Vuze 203 | #print s % (1, 94, ' >> try:'), 'magnet.vuze.com' 204 | #if ss.headers.get('Referer'): del ss.headers['Referer'] 205 | #chh = base64.b32encode(binascii.unhexlify(hh)) 206 | #url = 'http://magnet.vuze.com/magnetLookup?hash=%s' % chh 207 | #result = do(url) 208 | #if result: return result 209 | 210 | return False 211 | 212 | def magnet2torrent(self, urls, dir_): 213 | for url in urls: 214 | hh = re.search(r'urn:btih:(\w+)', url) 215 | if hh: 216 | hh = hh.group(1).upper() 217 | else: 218 | print s % (1, 91, ' !! magnet is wrong.'), url 219 | continue 220 | string = self.get_torrent(hh) 221 | if string: 222 | tpath = os.path.join(dir_, hh + '.torrent') 223 | print s % (1, 97, ' ++ magnet to torrent:'), \ 224 | 'magnet:?xt=urn:btih:%s' % hh 225 | with open(tpath, 'w') as g: 226 | g.write(string) 227 | else: 228 | print s % (1, 91, ' !! Can\'t get torrent from web.'), url 229 | 230 | def torrent2magnet(self, paths): 231 | def trans(tpath): 232 | if tpath.lower().endswith('torrent'): 233 | string = open(tpath).read() 234 | try: 235 | dd = bencode.bdecode(string) 236 | except Exception as e: 237 | print s % (1, 91, ' !! torrent is wrong:'), e 238 | return None 239 | info = bencode.bencode(dd['info']) 240 | hh = sha1(info).hexdigest() 241 | print '# %s' % tpath 242 | print 'magnet:?xt=urn:btih:%s' % hh, '\n' 243 | 244 | for path in paths: 245 | if os.path.exists(path): 246 | if os.path.isdir(path): 247 | for a, b, c in os.walk(path): 248 | for i in c: 249 | tpath = os.path.join(a, i) 250 | trans(tpath) 251 | elif os.path.isfile(path): 252 | tpath = path 253 | trans(tpath) 254 | else: 255 | print s % (1, 91, ' !! file doesn\'t existed'), \ 256 | s % (1, 93, '--'), path 257 | 258 | def change(self, ups, dir_, foo=None, bar=None): 259 | for up in ups: 260 | path = up 261 | if path.startswith('magnet:'): 262 | hh = re.search(r'urn:btih:(\w+)', path) 263 | if hh: 264 | hh = hh.group(1).upper() 265 | else: 266 | print s % (1, 91, ' !! magnet is wrong.'), path 267 | string = self.get_torrent(hh) 268 | if string: 269 | tpath = os.path.join(dir_, hh + '.torrent') 270 | print s % (1, 97, ' ++ transfer:'), \ 271 | 'magnet:?xt=urn:btih:%s' % hh 272 | self.transfer(string, tpath, foo=foo, bar=bar) 273 | else: 274 | print s % (1, 91, ' !! Can\'t get torrent from web.'), path 275 | 276 | elif os.path.exists(path): 277 | if os.path.isdir(path): 278 | for a, b, c in os.walk(path): 279 | for i in c: 280 | ipath = os.path.join(a, i) 281 | if i.lower().endswith('torrent'): 282 | def do(): 283 | print s % (1, 97, ' ++ transfer:'), ipath 284 | string = open(ipath).read() 285 | tpath = os.path.join(dir_, 'change_' + i) 286 | self.transfer(string, tpath, foo=foo, 287 | bar=bar) 288 | # ??? paths.update(ipath) 289 | if os.getcwd() == os.path.abspath(dir_): 290 | do() 291 | elif os.getcwd() != os.path.abspath(dir_) and \ 292 | os.path.abspath(a) != os.path.abspath(dir_): 293 | do() 294 | elif os.path.isfile(path): 295 | if path.lower().endswith('torrent'): 296 | print s % (1, 97, ' ++ transfer:'), path 297 | string = open(path).read() 298 | tpath = os.path.join(dir_, 299 | 'change_' + os.path.basename(path)) 300 | self.transfer(string, tpath, foo=foo, bar=bar) 301 | else: 302 | print s % (1, 91, ' !! file doesn\'t existed'), \ 303 | s % (1, 93, '--'), path 304 | 305 | def import_magnet(froms): 306 | ml = [] 307 | m_re = re.compile(r'btih:([a-zA-Z0-9]{40})') 308 | 309 | def get_magnet(cm): 310 | ls = m_re.findall(cm) 311 | ls = ['magnet:?xt=urn:btih:' + i for i in ls] 312 | return ls 313 | 314 | for path in froms: 315 | if path[0] == '~': 316 | path = os.path.expanduser(path) 317 | else: 318 | path = os.path.abspath(path) 319 | 320 | if os.path.isfile(path): 321 | cm = open(path).read() 322 | ls = get_magnet(cm) 323 | ml += ls 324 | elif os.path.isdir(path): 325 | for a, b, c in os.walk(path): 326 | for i in c: 327 | p = os.path.join(a, i) 328 | cm = open(p).read() 329 | ls = get_magnet(cm) 330 | ml += ls 331 | else: 332 | print s % (1, 91, ' !! path is wrong:'), path 333 | 334 | t = set(ml) 335 | return list(t) 336 | 337 | def main(argv): 338 | ###################################################### 339 | # for argparse 340 | p = argparse.ArgumentParser( 341 | description='magnet torrent 互转,数字命名bt内容文件名' \ 342 | ' 用法见 https://github.com/PeterDing/iScript') 343 | p.add_argument('xxx', type=str, nargs='*', 344 | help='命令对象.') 345 | p.add_argument('-i', '--import_from', type=str, nargs='*', 346 | help='import magnet from local.') 347 | p.add_argument('-p', '--proxy', action='store', 348 | type=str, help='proxy for torrage.com, \ 349 | eg: -p "sooks5://127.0.0.1:8883"') 350 | p.add_argument('-d', '--directory', action='store', default=None, 351 | type=str, help='torrents保存的路径, eg: -d /path/to/save') 352 | p.add_argument('-n', '--name', action='store', default=None, 353 | type=str, help='顶级文件夹名称, eg: -n thistopdirectory') 354 | p.add_argument('-t', '--type_', action='store', 355 | default='n', type=str, 356 | help='类型参数,eg: ') 357 | global args 358 | args = p.parse_args(argv[2:]) 359 | comd = argv[1] 360 | xxx = args.xxx 361 | 362 | dir_ = os.getcwd() if not args.directory \ 363 | else args.directory 364 | if not os.path.exists(dir_): 365 | os.mkdir(dir_) 366 | if comd == 'm' or comd == 'mt': # magnet to torrent 367 | urls = xxx if not args.import_from \ 368 | else import_magnet(args.import_from) 369 | x = Bt() 370 | x.magnet2torrent(urls, dir_) 371 | 372 | elif comd == 't' or comd == 'tm': # torrent ot magnet 373 | paths = xxx 374 | x = Bt() 375 | x.torrent2magnet(paths) 376 | 377 | elif comd == 'c' or comd == 'ct': # change 378 | ups = xxx if not args.import_from \ 379 | else import_magnet(args.import_from) 380 | x = Bt() 381 | x.change(ups, dir_, foo=None, bar=None) 382 | 383 | elif comd == 'cr' or comd == 'ctre': # change 384 | foo = xxx[0] 385 | bar = xxx[1] 386 | ups = xxx[2:] if not args.import_from \ 387 | else import_magnet(args.import_from) 388 | x = Bt() 389 | x.change(ups, dir_, foo=foo, bar=bar) 390 | 391 | else: 392 | print s % (2, 91, ' !! 命令错误\n') 393 | 394 | if __name__ == '__main__': 395 | argv = sys.argv 396 | main(argv) 397 | -------------------------------------------------------------------------------- /ed2k_search.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # vim: set fileencoding=utf8 3 | 4 | import sys 5 | import urllib 6 | import re 7 | import argparse 8 | 9 | s = '\x1b[%d;%dm%s\x1b[0m' # terminual color template 10 | 11 | opener = urllib.urlopen 12 | 13 | class ed2k_search(object): 14 | def __init__(self, keyword=''): 15 | self.url = "http://donkey4u.com/search/%s?page=%s&mode=list" \ 16 | % (keyword, '%s') 17 | print '' 18 | 19 | def get_infos(self, url): 20 | r = opener(url) 21 | assert r 22 | self.html = r.read() 23 | html = re.search(r'.+?
    ', 24 | self.html, re.DOTALL).group() 25 | 26 | sizes = re.findall(r'(.+)', html) 27 | seeds = re.findall(r'(.+)', html) 28 | links = re.findall(r'ed2k://.+?/', html) 29 | 30 | infos = zip(sizes, seeds, links) 31 | 32 | if infos: 33 | self.display(infos) 34 | else: 35 | print s % (1, 91, ' !! You are not Lucky, geting nothing.') 36 | sys.exit(1) 37 | 38 | def display(self, infos): 39 | template = ' size: ' + s % (1, 97, '%s') \ 40 | + ' seed: ' + s % (1, 91, '%s') \ 41 | + '\n ----------------------------' \ 42 | + '\n ' + s % (2, 92, '%s') \ 43 | + '\n ----------------------------\n' 44 | 45 | for i in infos: 46 | t = template % i 47 | print t 48 | 49 | def do(self): 50 | page = 1 51 | while True: 52 | url = self.url % page 53 | self.get_infos(url) 54 | nx = raw_input(s % (1, 93, ' next page?') + ' (N/y): ') 55 | if nx in ('Y', 'y'): 56 | page += 1 57 | print '' 58 | else: 59 | sys.exit(1) 60 | 61 | 62 | def main(xxx): 63 | keyword = ' '.join(xxx) 64 | x = ed2k_search(keyword) 65 | x.do() 66 | 67 | if __name__ == '__main__': 68 | p = argparse.ArgumentParser( 69 | description='searching ed2k at donkey4u.com') 70 | p.add_argument('xxx', type=str, nargs='*', help='keyword') 71 | args = p.parse_args() 72 | main(args.xxx) 73 | -------------------------------------------------------------------------------- /flv_cmd.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # vim: set fileencoding=utf8 3 | 4 | import re 5 | import requests 6 | import os 7 | import sys 8 | import argparse 9 | import random 10 | from HTMLParser import HTMLParser 11 | import urllib 12 | import select 13 | 14 | s = '\x1b[%d;%dm%s\x1b[0m' # terminual color template 15 | parser = HTMLParser() 16 | 17 | ############################################################ 18 | # wget exit status 19 | wget_es = { 20 | 0: "No problems occurred.", 21 | 2: "User interference.", 22 | 1<<8: "Generic error code.", 23 | 2<<8: "Parse error - for instance, \ 24 | when parsing command-line optio.wgetrc or .netrc...", 25 | 3<<8: "File I/O error.", 26 | 4<<8: "Network failure.", 27 | 5<<8: "SSL verification failure.", 28 | 6<<8: "Username/password authentication failure.", 29 | 7<<8: "Protocol errors.", 30 | 8<<8: "Server issued an error response." 31 | } 32 | ############################################################ 33 | 34 | headers = { 35 | "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) " \ 36 | "AppleWebKit/537.36 (KHTML, like Gecko) " \ 37 | "Chrome/40.0.2214.91 Safari/537.36", 38 | "Accept": "text/html,application/xhtml+xml,application/xml;" \ 39 | "q=0.9,image/webp,*/*;q=0.8", 40 | "Accept-Encoding": "gzip, deflate, sdch", 41 | "Accept-Language": "en-US,en;q=0.8", 42 | "Referer": "http://flvgo.com/download" 43 | } 44 | 45 | ss = requests.session() 46 | ss.headers.update(headers) 47 | 48 | def download(info): 49 | if not os.path.exists(info['dir_']): 50 | os.mkdir(info['dir_']) 51 | 52 | #else: 53 | #if os.path.exists(info['filename']): 54 | #return 0 55 | 56 | num = random.randint(0, 7) % 8 57 | col = s % (2, num + 90, os.path.basename(info['filename'])) 58 | print '\n ++ 正在下载:', '#', \ 59 | s % (1, 97, info['n']), '/', \ 60 | s % (1, 97, info['amount']), \ 61 | '#', col 62 | 63 | print info['durl'] 64 | cmd = 'wget -c -nv --user-agent "%s" -O "%s" "%s"' \ 65 | % (headers['User-Agent'], info['filename'], info['durl']) 66 | status = os.system(cmd) 67 | 68 | if status != 0: # other http-errors, such as 302. 69 | wget_exit_status_info = wget_es[status] 70 | print('\n\n ----### \x1b[1;91mERROR\x1b[0m ==> \ 71 | \x1b[1;91m%d (%s)\x1b[0m ###--- \n\n' \ 72 | % (status, wget_exit_status_info)) 73 | print s % (1, 91, ' ===> '), cmd 74 | sys.exit(1) 75 | 76 | def play(info): 77 | num = random.randint(0, 7) % 8 78 | col = s % (2, num + 90, os.path.basename(info['filename'])) 79 | print '\n ++ play:', '#', \ 80 | s % (1, 97, info['n']), '/', \ 81 | s % (1, 97, info['amount']), \ 82 | '#', col 83 | 84 | cmd = 'mpv --really-quiet --cache 8140 --cache-default 8140 ' \ 85 | '--http-header-fields "User-Agent:%s" ' \ 86 | '"%s"' % (headers['User-Agent'], info['durl']) 87 | #'"%s"' % parser.unescape(info['durl']) 88 | 89 | os.system(cmd) 90 | timeout = 1 91 | ii, _, _ = select.select([sys.stdin], [], [], timeout) 92 | if ii: 93 | sys.exit(0) 94 | else: 95 | pass 96 | 97 | def flvxz_parser(cn): 98 | blocks = cn.split('playerContainer')[1:] 99 | infos = {} 100 | title = re.search(r'class="name">(.+?)<', cn).group(1) 101 | infos['title'] = title 102 | infos['data'] = {} 103 | for bc in blocks: 104 | quality = re.search(r'视频格式:(\w+)', bc).group(1) 105 | size = sum([float(s) for s in re.findall(r'>([\d.]+) MB<', bc)]) 106 | durls = re.findall(r'', bc) 107 | infos['data'][quality] = { 108 | 'size': size, 109 | 'durls': durls 110 | } 111 | return infos 112 | 113 | def pickup(infos): 114 | print s % (1, 97, infos['title']) 115 | print s % (1, 97, ' ++ pick a quality:') 116 | sizes = [(infos['data'][q]['size'], q) for q in infos['data']] 117 | sizes.sort() 118 | sizes.reverse() 119 | for i in xrange(len(sizes)): 120 | print s % (1, 91, ' %s' % (i+1)), \ 121 | str(sizes[i][0]) + 'MB\t', sizes[i][1] 122 | 123 | p = raw_input(s % (1, 92, ' Enter') + ' (1): ') 124 | if p == '': 125 | return sizes[0][1] 126 | if not p.isdigit(): 127 | print s % (1, 91, ' !! enter error') 128 | sys.exit() 129 | p = int(p) 130 | if p <= len(infos['data']): 131 | print s % (2, 92, ' -- %s' % sizes[p-1][1]) 132 | return sizes[p-1][1] 133 | else: 134 | print s % (1, 91, ' !! enter error') 135 | sys.exit() 136 | 137 | def getext(durl): 138 | if durl.find('flv'): 139 | return '.flv' 140 | elif durl.find('mp4'): 141 | return '.mp4' 142 | elif durl.find('m3u8'): 143 | return '.m3u8' 144 | else: 145 | return '.flv' 146 | 147 | def main(purl): 148 | apiurl = 'http://flvgo.com/download?url=%s' % urllib.quote(purl) 149 | ss.get('http://flvgo.com') 150 | cn = ss.get(apiurl).content 151 | infos = flvxz_parser(cn) 152 | title = infos['title'] 153 | quality = pickup(infos) 154 | durls = infos['data'][quality]['durls'] 155 | 156 | yes = True if len(durls) > 1 else False 157 | dir_ = os.path.join(os.getcwd(), infos['title']) if yes else os.getcwd() 158 | 159 | n = args.from_ - 1 160 | amount = len(durls) 161 | 162 | for i in xrange(n, amount): 163 | info = { 164 | 'title': title, 165 | 'filename': os.path.join(dir_, str(i+1) + getext(durls[i])), 166 | 'durl': durls[i], 167 | 'dir_': dir_, 168 | 'amount': amount, 169 | 'n': n 170 | } 171 | if args.play: 172 | play(info) 173 | else: 174 | download(info) 175 | n += 1 176 | 177 | if __name__ == '__main__': 178 | p = argparse.ArgumentParser(description='flvxz') 179 | p.add_argument('url', help='site url') 180 | p.add_argument('-p', '--play', action='store_true', \ 181 | help='play with mpv') 182 | p.add_argument('-f', '--from_', action='store', \ 183 | default=1, type=int, \ 184 | help='从第几个开始下载,eg: -f 42') 185 | args = p.parse_args() 186 | main(args.url) 187 | -------------------------------------------------------------------------------- /leetcode_problems.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding=utf-8 -*- 3 | 4 | import sys 5 | import re 6 | import os 7 | import argparse 8 | import requests 9 | from lxml import html as lxml_html 10 | 11 | try: 12 | import html 13 | except ImportError: 14 | import HTMLParser 15 | html = HTMLParser.HTMLParser() 16 | 17 | try: 18 | import cPickle as pk 19 | except ImportError: 20 | import pickle as pk 21 | 22 | class LeetcodeProblems(object): 23 | def get_problems_info(self): 24 | leetcode_url = 'https://leetcode.com/problemset/algorithms' 25 | res = requests.get(leetcode_url) 26 | if not res.ok: 27 | print('request error') 28 | sys.exit() 29 | cm = res.text 30 | cmt = cm.split('tbody>')[-2] 31 | indexs = re.findall(r'(\d+)', cmt) 32 | problem_urls = ['https://leetcode.com' + url \ 33 | for url in re.findall( 34 | r'(.+?)", cmt) 36 | tinfos = zip(indexs, levels, problem_urls) 37 | assert (len(indexs) == len(problem_urls) == len(levels)) 38 | infos = [] 39 | for info in tinfos: 40 | res = requests.get(info[-1]) 41 | if not res.ok: 42 | print('request error') 43 | sys.exit() 44 | tree = lxml_html.fromstring(res.text) 45 | title = tree.xpath('//meta[@property="og:title"]/@content')[0] 46 | description = tree.xpath('//meta[@property="description"]/@content') 47 | if not description: 48 | description = tree.xpath('//meta[@property="og:description"]/@content')[0] 49 | else: 50 | description = description[0] 51 | description = html.unescape(description.strip()) 52 | tags = tree.xpath('//div[@id="tags"]/following::a[@class="btn btn-xs btn-primary"]/text()') 53 | infos.append( 54 | { 55 | 'title': title, 56 | 'level': info[1], 57 | 'index': int(info[0]), 58 | 'description': description, 59 | 'tags': tags 60 | } 61 | ) 62 | 63 | with open('leecode_problems.pk', 'wb') as g: 64 | pk.dump(infos, g) 65 | return infos 66 | 67 | def to_text(self, pm_infos): 68 | if self.args.index: 69 | key = 'index' 70 | elif self.args.title: 71 | key = 'title' 72 | elif self.args.tag: 73 | key = 'tags' 74 | elif self.args.level: 75 | key = 'level' 76 | else: 77 | key = 'index' 78 | 79 | infos = sorted(pm_infos, key=lambda i: i[key]) 80 | 81 | text_template = '## {index} - {title}\n' \ 82 | '~{level}~ {tags}\n' \ 83 | '{description}\n' + '\n' * self.args.line 84 | text = '' 85 | for info in infos: 86 | if self.args.rm_blank: 87 | info['description'] = re.sub(r'[\n\r]+', r'\n', info['description']) 88 | text += text_template.format(**info) 89 | 90 | with open('leecode problems.txt', 'w') as g: 91 | g.write(text) 92 | 93 | def run(self): 94 | if os.path.exists('leecode_problems.pk') and not self.args.redownload: 95 | with open('leecode_problems.pk', 'rb') as f: 96 | pm_infos = pk.load(f) 97 | else: 98 | pm_infos = self.get_problems_info() 99 | 100 | print('find %s problems.' % len(pm_infos)) 101 | self.to_text(pm_infos) 102 | 103 | def handle_args(argv): 104 | p = argparse.ArgumentParser(description='extract all leecode problems to location') 105 | p.add_argument('--index', action='store_true', help='sort by index') 106 | p.add_argument('--level', action='store_true', help='sort by level') 107 | p.add_argument('--tag', action='store_true', help='sort by tag') 108 | p.add_argument('--title', action='store_true', help='sort by title') 109 | p.add_argument('--rm_blank', action='store_true', help='remove blank') 110 | p.add_argument('--line', action='store', type=int, default=10, help='blank of two problems') 111 | p.add_argument('-r', '--redownload', action='store_true', help='redownload data') 112 | args = p.parse_args(argv[1:]) 113 | return args 114 | 115 | def main(argv): 116 | args = handle_args(argv) 117 | x = LeetcodeProblems() 118 | x.args = args 119 | x.run() 120 | 121 | if __name__ == '__main__': 122 | argv = sys.argv 123 | main(argv) 124 | -------------------------------------------------------------------------------- /music.163.com.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # vim: set fileencoding=utf8 3 | 4 | import re 5 | import sys 6 | import os 7 | import random 8 | import time 9 | import json 10 | import argparse 11 | import urllib 12 | import requests 13 | import select 14 | import md5 15 | from mutagen.id3 import ID3,TRCK,TIT2,TALB,TPE1,APIC,TDRC,COMM,TPOS,USLT 16 | from HTMLParser import HTMLParser 17 | 18 | parser = HTMLParser() 19 | s = u'\x1b[%d;%dm%s\x1b[0m' # terminual color template 20 | 21 | ############################################################ 22 | # music.163.com api 23 | # {{{ 24 | url_song = "http://music.163.com/api/song/detail?id=%s&ids=%s" 25 | url_album = "http://music.163.com/api/album/%s" 26 | url_playlist = "http://music.163.com/api/playlist/detail?id=%s&ids=%s" 27 | url_dj = "http://music.163.com/api/dj/program/detail?id=%s&ids=%s" 28 | url_artist_albums = "http://music.163.com\ 29 | /api/artist/albums/%s?offset=0&limit=1000" 30 | url_artist_top_50_songs = "http://music.163.com/artist?id=%s" 31 | # }}} 32 | ############################################################ 33 | 34 | ############################################################ 35 | # wget exit status 36 | wget_es = { 37 | 0:"No problems occurred.", 38 | 2:"User interference.", 39 | 1<<8:"Generic error code.", 40 | 2<<8:"Parse error - for instance, when parsing command-line ' \ 41 | 'optio.wgetrc or .netrc...", 42 | 3<<8:"File I/O error.", 43 | 4<<8:"Network failure.", 44 | 5<<8:"SSL verification failure.", 45 | 6<<8:"Username/password authentication failure.", 46 | 7<<8:"Protocol errors.", 47 | 8<<8:"Server issued an error response." 48 | } 49 | ############################################################ 50 | 51 | headers = { 52 | "Accept":"text/html,application/xhtml+xml,application/xml; " \ 53 | "q=0.9,image/webp,*/*;q=0.8", 54 | "Accept-Encoding":"text/html", 55 | "Accept-Language":"en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4,zh-TW;q=0.2", 56 | "Content-Type":"application/x-www-form-urlencoded", 57 | "Referer":"http://music.163.com/", 58 | "User-Agent":"Mozilla/5.0 (X11; Linux x86_64) " \ 59 | "AppleWebKit/537.36 (KHTML, like Gecko) " \ 60 | "Chrome/40.0.2214.91 Safari/537.36" 61 | } 62 | 63 | ss = requests.session() 64 | ss.headers.update(headers) 65 | 66 | def encrypted_id(id): 67 | byte1 = bytearray('3go8&$8*3*3h0k(2)2') 68 | byte2 = bytearray(id) 69 | byte1_len = len(byte1) 70 | for i in xrange(len(byte2)): 71 | byte2[i] = byte2[i]^byte1[i%byte1_len] 72 | m = md5.new() 73 | m.update(byte2) 74 | result = m.digest().encode('base64')[:-1] 75 | result = result.replace('/', '_') 76 | result = result.replace('+', '-') 77 | return result 78 | 79 | def modificate_text(text): 80 | text = parser.unescape(text) 81 | text = re.sub(r'//*', '-', text) 82 | text = text.replace('/', '-') 83 | text = text.replace('\\', '-') 84 | text = re.sub(r'\s\s+', ' ', text) 85 | return text 86 | 87 | # for FAT file system 88 | def modificate_file_name_for_wget(file_name): 89 | file_name = re.sub(r'\s*:\s*', u' - ', file_name) 90 | file_name = file_name.replace('?', '') 91 | file_name = file_name.replace('"', '\'') 92 | return file_name 93 | 94 | def z_index(size): 95 | z = len(str(size)) 96 | return z 97 | 98 | ######################################################## 99 | 100 | class neteaseMusic(object): 101 | def __init__(self, url): 102 | self.url = url 103 | self.song_infos = [] 104 | self.dir_ = os.getcwd().decode('utf8') 105 | 106 | self.playlist_id = '' 107 | self.dj_id = '' 108 | self.album_id = '' 109 | self.artist_id = '' 110 | self.song_id = '' 111 | self.cover_id = '' 112 | self.cover_data = '' 113 | self.amount_songs = u'1' 114 | 115 | self.download = self.play if args.play else self.download 116 | 117 | def get_durl(self, i): 118 | for q in ('hMusic', 'mMusic', 'lMusic'): 119 | if i[q]: 120 | dfsId = str(i[q]['dfsId']) 121 | edfsId = encrypted_id(dfsId) 122 | durl = u'http://p1.music.126.net/%s/%s.mp3' \ 123 | % (edfsId, dfsId) 124 | return durl, q[0] 125 | return None, None 126 | 127 | def get_cover(self, info): 128 | if info['album_name'] == self.cover_id: 129 | return self.cover_data 130 | else: 131 | self.cover_id = info['album_name'] 132 | while True: 133 | url = info['album_pic_url'] 134 | try: 135 | self.cover_data = requests.get(url).content 136 | if self.cover_data[:5] != '> 输入 a 下载该艺术家所有专辑.\n' \ 184 | ' >> 输入 t 下载该艺术家 Top 50 歌曲.\n >> ') 185 | if code == 'a': 186 | print(s % (2, 92, u'\n -- 正在分析艺术家专辑信息 ...')) 187 | self.download_artist_albums() 188 | elif code == 't': 189 | print(s % (2, 92, u'\n -- 正在分析艺术家 Top 50 信息 ...')) 190 | self.download_artist_top_50_songs() 191 | else: 192 | print(s % (1, 92, u' --> Over')) 193 | elif 'song' in self.url: 194 | self.song_id = re.search( 195 | r'song.+?(\d+)', self.url).group(1) 196 | print(s % (2, 92, u'\n -- 正在分析歌曲信息 ...')) 197 | self.download_song() 198 | elif 'djradio' in self.url: 199 | self.djradio_id = re.search( 200 | r'id=(\d+)', self.url).group(1) 201 | print(s % (2, 92, u'\n -- 正在分析DJ节目信息 ...')) 202 | self.download_djradio() 203 | elif 'program' in self.url: 204 | self.dj_id = re.search( 205 | r'id=(\d+)', self.url).group(1) 206 | print(s % (2, 92, u'\n -- 正在分析DJ节目信息 ...')) 207 | self.download_dj() 208 | else: 209 | print(s % (2, 91, u' 请正确输入music.163.com网址.')) 210 | 211 | def get_song_info(self, i): 212 | z = z_index(i['album']['size']) \ 213 | if i['album'].get('size') else 1 214 | song_info = {} 215 | song_info['song_id'] = i['id'] 216 | song_info['song_url'] = u'http://music.163.com/song/%s' \ 217 | % i['id'] 218 | song_info['track'] = str(i['position']) 219 | song_info['durl'], song_info['mp3_quality'] = self.get_durl(i) 220 | #song_info['album_description'] = album_description 221 | #song_info['lyric_url'] = i['lyric'] 222 | #song_info['sub_title'] = i['sub_title'] 223 | #song_info['composer'] = i['composer'] 224 | #song_info['disc_code'] = i['disc_code'] 225 | #if not song_info['sub_title']: song_info['sub_title'] = u'' 226 | #if not song_info['composer']: song_info['composer'] = u'' 227 | #if not song_info['disc_code']: song_info['disc_code'] = u'' 228 | t = time.gmtime(int(i['album']['publishTime'])*0.001) 229 | #song_info['year'] = unicode('-'.join([str(t.tm_year), \ 230 | #str(t.tm_mon), str(t.tm_mday)])) 231 | song_info['year'] = unicode('-'.join( 232 | [str(t.tm_year), str(t.tm_mon), str(t.tm_mday)] 233 | )) 234 | song_info['song_name'] = modificate_text(i['name']).strip() 235 | song_info['artist_name'] = modificate_text(i['artists'][0]['name']) 236 | song_info['album_pic_url'] = i['album']['picUrl'] 237 | song_info['cd_serial'] = u'1' 238 | song_info['album_name'] = modificate_text(i['album']['name']) 239 | file_name = song_info[ 'track'].zfill(z) \ 240 | + '.' + song_info['song_name'] \ 241 | + ' - ' + song_info['artist_name'] \ 242 | + '.mp3' 243 | song_info['file_name'] = file_name 244 | # song_info['low_mp3'] = i['mp3Url'] 245 | return song_info 246 | 247 | def get_song_infos(self, songs): 248 | for i in songs: 249 | song_info = self.get_song_info(i) 250 | self.song_infos.append(song_info) 251 | 252 | def download_song(self, noprint=False, n=1): 253 | j = ss.get( 254 | url_song % ( 255 | self.song_id, urllib.quote('[%s]' % self.song_id) 256 | ) 257 | ).json() 258 | songs = j['songs'] 259 | if not noprint: 260 | print(s % (2, 97, u'\n >> ' + u'1 首歌曲将要下载.')) \ 261 | if not args.play else '' 262 | self.get_song_infos(songs) 263 | self.download(self.amount_songs, n) 264 | 265 | def download_album(self): 266 | j = ss.get(url_album % (self.album_id)).json() 267 | songs = j['album']['songs'] 268 | d = modificate_text( 269 | j['album']['name'] \ 270 | + ' - ' + j['album']['artist']['name']) 271 | dir_ = os.path.join(os.getcwd().decode('utf8'), d) 272 | self.dir_ = modificate_file_name_for_wget(dir_) 273 | self.amount_songs = unicode(len(songs)) 274 | print(s % (2, 97, \ 275 | u'\n >> ' + self.amount_songs + u' 首歌曲将要下载.')) \ 276 | if not args.play else '' 277 | self.get_song_infos(songs) 278 | self.download(self.amount_songs) 279 | 280 | def download_playlist(self): 281 | j = ss.get( 282 | url_playlist % ( 283 | self.playlist_id, urllib.quote('[%s]' % self.playlist_id) 284 | ) 285 | ).json() 286 | songs = j['result']['tracks'] 287 | d = modificate_text( 288 | j['result']['name'] + ' - ' \ 289 | + j['result']['creator']['nickname']) 290 | dir_ = os.path.join(os.getcwd().decode('utf8'), d) 291 | self.dir_ = modificate_file_name_for_wget(dir_) 292 | self.amount_songs = unicode(len(songs)) 293 | print(s % (2, 97, u'\n >> ' \ 294 | + self.amount_songs + u' 首歌曲将要下载.')) \ 295 | if not args.play else '' 296 | self.get_song_infos(songs) 297 | self.download(self.amount_songs) 298 | 299 | def download_djradio(self): 300 | html = ss.get( 301 | 'http://music.163.com/djradio?id=%s' \ 302 | % self.djradio_id).content 303 | dj_ids = re.findall(r'/program\?id=(\d+)', html) 304 | 305 | for dj_id in dj_ids: 306 | self.dj_id = dj_id 307 | self.download_dj() 308 | self.song_infos = [] 309 | 310 | def download_dj(self): 311 | j = ss.get( 312 | url_dj % ( 313 | self.dj_id, urllib.quote('[%s]' % self.dj_id) 314 | ) 315 | ).json() 316 | songs = j['program']['songs'] 317 | d = modificate_text( 318 | j['program']['name'] + ' - ' \ 319 | + j['program']['dj']['nickname']) 320 | dir_ = os.path.join(os.getcwd().decode('utf8'), d) 321 | self.dir_ = modificate_file_name_for_wget(dir_) 322 | self.amount_songs = unicode(len(songs)) 323 | print(s % (2, 97, u'\n >> \ 324 | ' + self.amount_songs + u' 首歌曲将要下载.')) \ 325 | if not args.play else None 326 | self.get_song_infos(songs) 327 | self.download(self.amount_songs) 328 | 329 | 330 | def download_artist_albums(self): 331 | ss.cookies.update({'appver': '1.5.2'}) 332 | j = ss.get( 333 | url_artist_albums % self.artist_id).json() 334 | for albuminfo in j['hotAlbums']: 335 | self.album_id = albuminfo['id'] 336 | self.download_album() 337 | 338 | def download_artist_top_50_songs(self): 339 | html = ss.get( 340 | url_artist_top_50_songs % self.artist_id).content 341 | text = re.search( 342 | r'', html).group(1) 343 | j = json.loads(text) 344 | songids = [i['id'] for i in j] 345 | d = modificate_text( 346 | j[0]['artists'][0]['name'] + ' - ' + 'Top 50') 347 | dir_ = os.path.join(os.getcwd().decode('utf8'), d) 348 | self.dir_ = modificate_file_name_for_wget(dir_) 349 | self.amount_songs = unicode(len(songids)) 350 | print(s % (2, 97, u'\n >> \ 351 | ' + self.amount_songs + u' 首歌曲将要下载.')) \ 352 | if not args.play else '' 353 | n = 1 354 | for sid in songids: 355 | self.song_id = sid 356 | self.song_infos = [] 357 | self.download_song(noprint=True, n=n) 358 | n += 1 359 | 360 | def display_infos(self, i): 361 | q = {'h': 'High', 'm': 'Middle', 'l': 'Low'} 362 | print '\n ----------------' 363 | print ' >>', s % (2, 94, i['file_name']) 364 | print ' >>', s % (2, 95, i['album_name']) 365 | print ' >>', s % (2, 92, 'http://music.163.com/song/%s' \ 366 | % i['song_id']) 367 | print ' >>', s % (2, 97, 'MP3-Quality'), ':', \ 368 | s % (1, 92, str(q.get(i['mp3_quality']))) 369 | print '' 370 | 371 | def play(self, amount_songs, n=None): 372 | for i in self.song_infos: 373 | self.display_infos(i) 374 | if not i['durl']: 375 | continue 376 | cmd = 'mpv --really-quiet --audio-display no %s' % i['durl'] 377 | os.system(cmd) 378 | timeout = 1 379 | ii, _, _ = select.select([sys.stdin], [], [], timeout) 380 | if ii: 381 | sys.exit(0) 382 | else: 383 | pass 384 | 385 | def download(self, amount_songs, n=None): 386 | dir_ = modificate_file_name_for_wget(self.dir_) 387 | cwd = os.getcwd().decode('utf8') 388 | if dir_ != cwd: 389 | if not os.path.exists(dir_): 390 | os.mkdir(dir_) 391 | ii = 1 392 | for i in self.song_infos: 393 | num = random.randint(0, 100) % 7 394 | col = s % (2, num + 90, i['file_name']) 395 | t = modificate_file_name_for_wget(i['file_name']) 396 | file_name = os.path.join(dir_, t) 397 | if os.path.exists(file_name): # if file exists, no get_durl 398 | if args.undownload: 399 | self.modified_id3(file_name, i) 400 | ii += 1 401 | continue 402 | else: 403 | ii += 1 404 | continue 405 | if not args.undownload: 406 | q = {'h': 'High', 'm': 'Middle', 'l': 'Low'} 407 | mp3_quality = str(q.get(i['mp3_quality'])) 408 | if n == None: 409 | print(u'\n ++ 正在下载: #%s/%s# %s\n' \ 410 | u' ++ mp3_quality: %s' \ 411 | % (ii, amount_songs, col, 412 | s % (1, 91, mp3_quality))) 413 | else: 414 | print(u'\n ++ 正在下载: #%s/%s# %s\n' \ 415 | u' ++ mp3_quality: %s' \ 416 | % (n, amount_songs, col, 417 | s % (1, 91, mp3_quality))) 418 | if not i['durl']: 419 | continue 420 | 421 | file_name_for_wget = file_name.replace('`', '\`') 422 | cmd = 'wget -c -nv -U "%s" -O "%s.tmp" %s' \ 423 | % (headers['User-Agent'], file_name_for_wget, i['durl']) 424 | cmd = cmd.encode('utf8') 425 | status = os.system(cmd) 426 | if status != 0: # other http-errors, such as 302. 427 | wget_exit_status_info = wget_es[status] 428 | print('\n\n ----### \x1b[1;91mERROR\x1b[0m ==> \x1b[1;91m%d ' \ 429 | '(%s)\x1b[0m ###--- \n\n' \ 430 | % (status, wget_exit_status_info)) 431 | print s % (1, 91, ' ===> '), cmd 432 | sys.exit(1) 433 | else: 434 | os.rename('%s.tmp' % file_name, file_name) 435 | 436 | self.modified_id3(file_name, i) 437 | ii += 1 438 | time.sleep(0) 439 | 440 | def main(url): 441 | x = neteaseMusic(url) 442 | x.url_parser() 443 | 444 | if __name__ == '__main__': 445 | p = argparse.ArgumentParser( 446 | description='downloading any music.163.com') 447 | p.add_argument('url', help='any url of music.163.com') 448 | p.add_argument('-p', '--play', action='store_true', \ 449 | help='play with mpv') 450 | p.add_argument('-c', '--undownload', action='store_true', \ 451 | help='no download, using to renew id3 tags') 452 | args = p.parse_args() 453 | main(args.url) 454 | -------------------------------------------------------------------------------- /music.baidu.com.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # vim: set fileencoding=utf8 3 | 4 | import re 5 | import sys 6 | import os 7 | import random 8 | import time 9 | import json 10 | import urllib2 11 | import argparse 12 | import select 13 | 14 | from mutagen.id3 import ID3,TRCK,TIT2,TALB,TPE1,APIC,TDRC,COMM,TCOM,TCON,TSST,WXXX,TSRC 15 | from HTMLParser import HTMLParser 16 | parser = HTMLParser() 17 | s = u'\x1b[%d;%dm%s\x1b[0m' # terminual color template 18 | 19 | headers = { 20 | "Accept":"text/html,application/xhtml+xml,application/xml; \ 21 | q=0.9,image/webp,*/*;q=0.8", 22 | "Accept-Encoding":"text/html", 23 | "Accept-Language":"en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4,zh-TW;q=0.2", 24 | "Content-Type":"application/x-www-form-urlencoded", 25 | "Referer":"http://www.baidu.com/", 26 | "User-Agent":"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 \ 27 | (KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36" 28 | } 29 | 30 | ############################################################ 31 | # wget exit status 32 | wget_es = { 33 | 0:"No problems occurred.", 34 | 2:"User interference.", 35 | 1<<8:"Generic error code.", 36 | 2<<8:"Parse error - for instance, when parsing command-line \ 37 | optio.wgetrc or .netrc...", 38 | 3<<8:"File I/O error.", 39 | 4<<8:"Network failure.", 40 | 5<<8:"SSL verification failure.", 41 | 6<<8:"Username/password authentication failure.", 42 | 7<<8:"Protocol errors.", 43 | 8<<8:"Server issued an error response." 44 | } 45 | ############################################################ 46 | 47 | def modificate_text(text): 48 | text = parser.unescape(text) 49 | text = re.sub(r'//*', '-', text) 50 | text = text.replace('/', '-') 51 | text = text.replace('\\', '-') 52 | text = re.sub(r'\s\s+', ' ', text) 53 | return text 54 | 55 | def modificate_file_name_for_wget(file_name): 56 | file_name = re.sub(r'\s*:\s*', u' - ', file_name) # for FAT file system 57 | file_name = file_name.replace('?', '') # for FAT file system 58 | file_name = file_name.replace('"', '\'') # for FAT file system 59 | return file_name 60 | 61 | def z_index(song_infos): 62 | size = len(song_infos) 63 | z = len(str(size)) 64 | return z 65 | 66 | class baidu_music(object): 67 | def __init__(self, url): 68 | self.url = url 69 | self.song_infos = [] 70 | self.json_url = '' 71 | self.dir_ = os.getcwd().decode('utf8') 72 | self.template_wgets = 'wget -nv -U "%s" -O "%s.tmp" %s' % (headers['User-Agent'], '%s', '%s') 73 | self.template_album = 'http://music.baidu.com/album/%s' 74 | if args.flac: 75 | self.template_api = 'http://music.baidu.com/data/music/fmlink?songIds=%s&type=flac' 76 | elif args.low: 77 | self.template_api = 'http://music.baidu.com/data/music/fmlink?songIds=%s&type=mp3' 78 | elif args.high: 79 | self.template_api = 'http://music.baidu.com/data/music/fmlink?songIds=%s&type=mp3&rate=320' 80 | else: 81 | self.template_api = 'http://music.baidu.com/data/music/fmlink?songIds=%s&type=mp3&rate=320' 82 | 83 | self.album_id = '' 84 | self.song_id = '' 85 | 86 | self.download = self.play if args.play else self.download 87 | 88 | def get_songidlist(self, song_id): 89 | html = self.opener.open(self.template_album % song_id).read() 90 | songidlist = re.findall(r'/song/(\d+)', html) 91 | return songidlist 92 | 93 | def get_cover(self, url): 94 | i = 1 95 | while True: 96 | cover_data = self.opener.open(url).read() 97 | if cover_data[:5] != '= 10: 100 | print s % (1, 91, " |--> Error: can't get cover image") 101 | sys.exit(0) 102 | i += 1 103 | 104 | def modified_id3(self, file_name, info): 105 | id3 = ID3() 106 | id3.add(TRCK(encoding=3, text=info['track'])) 107 | id3.add(TIT2(encoding=3, text=info['song_name'])) 108 | id3.add(TALB(encoding=3, text=info['album_name'])) 109 | id3.add(TPE1(encoding=3, text=info['artist_name'])) 110 | id3.add(COMM(encoding=3, desc=u'Comment', text=info['song_url'])) 111 | id3.add(APIC(encoding=3, mime=u'image/jpg', type=3, desc=u'Cover', data=self.get_cover(info['album_pic_url']))) 112 | id3.save(file_name) 113 | 114 | def url_parser(self): 115 | if '/album/' in self.url: 116 | self.album_id = re.search(r'/album/(\d+)', self.url).group(1) 117 | #print(s % (2, 92, u'\n -- 正在分析专辑信息 ...')) 118 | self.get_album_infos() 119 | elif '/song/' in self.url: 120 | self.song_id = re.search(r'/song/(\d+)', self.url).group(1) 121 | #print(s % (2, 92, u'\n -- 正在分析歌曲信息 ...')) 122 | self.get_song_infos(self.song_id) 123 | else: 124 | print(s % (2, 91, u' 请正确输入baidu网址.')) 125 | self.download() 126 | 127 | def get_song_infos(self, song_id, track_number=''): 128 | api_json = self.opener.open(self.template_api % song_id).read() 129 | j = json.loads(api_json) 130 | song_info = {} 131 | song_info['song_id'] = unicode(j['data']['songList'][0]['songId']) 132 | song_info['track'] = unicode(track_number) 133 | song_info['song_url'] = u'http://music.baidu.com/song/' + song_info['song_id'] 134 | song_info['song_name'] = modificate_text(j['data']['songList'][0]['songName']).strip() 135 | song_info['album_name'] = modificate_text(j['data']['songList'][0]['albumName']).strip() 136 | song_info['artist_name'] = modificate_text(j['data']['songList'][0]['artistName']).strip() 137 | song_info['album_pic_url'] = j['data']['songList'][0]['songPicRadio'] 138 | song_info['file_name'] = song_info['artist_name'] + ' - ' + song_info['song_name'] 139 | if song_info['track']: 140 | song_info['file_name'] = song_info['track'].zfill(2) + '.' + song_info['file_name'] 141 | if args.flac: 142 | song_info['file_name'] = song_info['file_name'] + '.flac' 143 | else: 144 | song_info['file_name'] = song_info['file_name'] + '.mp3' 145 | song_info['durl'] = j['data']['songList'][0]['songLink'] 146 | self.song_infos.append(song_info) 147 | 148 | def get_album_infos(self): 149 | songidlist = self.get_songidlist(self.album_id) 150 | track_number = 1 151 | for i in songidlist: 152 | self.get_song_infos(i, track_number) 153 | track_number += 1 154 | d = modificate_text(self.song_infos[0]['artist_name'] + ' - ' + self.song_infos[0]['album_name']) 155 | self.dir_ = os.path.join(os.getcwd().decode('utf8'), d) 156 | 157 | def display_infos(self, i): 158 | print '\n ----------------' 159 | print ' >>', s % (2, 94, i['file_name']) 160 | print ' >>', s % (2, 95, i['album_name']) 161 | print ' >>', s % (2, 92, 'http://music.baidu.com/song/%s' % i['song_id']) 162 | print '' 163 | 164 | def play(self): 165 | for i in self.song_infos: 166 | durl = i['durl'] 167 | self.display_infos(i) 168 | os.system('mpv --really-quiet %s' % durl) 169 | timeout = 1 170 | ii, _, _ = select.select([sys.stdin], [], [], timeout) 171 | if ii: 172 | sys.exit(0) 173 | else: 174 | pass 175 | 176 | def download(self): 177 | dir_ = modificate_file_name_for_wget(self.dir_) 178 | cwd = os.getcwd().decode('utf8') 179 | csongs = len(self.song_infos) 180 | if dir_ != cwd: 181 | if not os.path.exists(dir_): 182 | os.mkdir(dir_) 183 | print(s % (2, 97, u'\n >> ' + str(csongs) + u' 首歌曲将要下载.')) 184 | ii = 1 185 | for i in self.song_infos: 186 | t = modificate_file_name_for_wget(i['file_name']) 187 | file_name = os.path.join(dir_, t) 188 | if os.path.exists(file_name): ## if file exists, no get_durl 189 | ii += 1 190 | print(u'\n 文件已存在~') 191 | continue 192 | file_name_for_wget = file_name.replace('`', '\`') 193 | if 'zhangmenshiting.baidu.com' in i['durl'] or \ 194 | 'yinyueshiting.baidu.com' in i['durl']: 195 | num = random.randint(0,100) % 7 196 | col = s % (2, num + 90, i['file_name']) 197 | print(u'\n ++ 正在下载: %s' % col) 198 | wget = self.template_wgets % (file_name_for_wget, i['durl']) 199 | wget = wget.encode('utf8') 200 | status = os.system(wget) 201 | if status != 0: # other http-errors, such as 302. 202 | wget_exit_status_info = wget_es[status] 203 | print('\n\n ----### \x1b[1;91mERROR\x1b[0m ==> \x1b[1;91m%d (%s)\x1b[0m ###--- \n\n' % (status, wget_exit_status_info)) 204 | print(' ===> ' + wget) 205 | break 206 | else: 207 | os.rename('%s.tmp' % file_name, file_name) 208 | 209 | self.modified_id3(file_name, i) 210 | ii += 1 211 | #time.sleep(10) 212 | else: 213 | print s % (1, 91, ' !! Oops, you are unlucky, the song is not from zhangmenshiting.baidu.com') 214 | print i['durl'] 215 | 216 | def main(url): 217 | x = baidu_music(url) 218 | opener = urllib2.build_opener() 219 | opener.addheaders = headers.items() 220 | x.opener = opener 221 | x.url_parser() 222 | 223 | if __name__ == '__main__': 224 | p = argparse.ArgumentParser(description='downloading any music.baidu.com') 225 | p.add_argument('url', help='any url of music.baidu.com') 226 | p.add_argument('-f', '--flac', action='store_true', help='download flac') 227 | p.add_argument('-i', '--high', action='store_true', help='download 320') 228 | p.add_argument('-l', '--low', action='store_true', help='download 128') 229 | p.add_argument('-p', '--play', action='store_true', \ 230 | help='play with mpv') 231 | args = p.parse_args() 232 | main(args.url) 233 | -------------------------------------------------------------------------------- /tumblr.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # vim: set fileencoding=utf8 3 | 4 | from __future__ import unicode_literals 5 | 6 | import os 7 | import sys 8 | import re 9 | import json 10 | import collections 11 | import multiprocessing 12 | import requests 13 | requests.packages.urllib3.disable_warnings() 14 | import argparse 15 | import random 16 | import time 17 | import select 18 | import signal 19 | 20 | API_KEY = 'fuiKNFp9vQFvjLNvx4sUwti4Yb5yGutBN4Xh10LXZhhRKjWlV4' 21 | 22 | PID_PATH = '/tmp/tumblr.py.pid' 23 | 24 | # statistic parameters 25 | NET_ERRORS = multiprocessing.Value('i', 0) 26 | UNCOMPLETION = multiprocessing.Value('i', 0) 27 | DOWNLOAD_ERRORS = multiprocessing.Value('i', 0) 28 | DOWNLOADS = multiprocessing.Value('i', 0) 29 | COMPLETION = multiprocessing.Value('i', 0) 30 | OFFSET = multiprocessing.Value('i', 0) 31 | 32 | ############################################################ 33 | # wget exit status 34 | wget_es = { 35 | 0: "No problems occurred.", 36 | 2: "User interference.", 37 | 1<<8: "Generic error code.", 38 | 2<<8: "Parse error - for instance, when parsing command-line " \ 39 | "optio.wgetrc or .netrc...", 40 | 3<<8: "File I/O error.", 41 | 4<<8: "Network failure.", 42 | 5<<8: "SSL verification failure.", 43 | 6<<8: "Username/password authentication failure.", 44 | 7<<8: "Protocol errors.", 45 | 8<<8: "Server issued an error response." 46 | } 47 | ############################################################ 48 | 49 | s = '\x1b[%d;%dm%s\x1b[0m' # terminual color template 50 | 51 | headers = { 52 | "Accept":"text/html,application/xhtml+xml,application/xml; " \ 53 | "q=0.9,image/webp,*/*;q=0.8", 54 | "Accept-Encoding":"text/html", 55 | "Accept-Language":"en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4,zh-TW;q=0.2", 56 | "Content-Type":"application/x-www-form-urlencoded", 57 | "Referer":"https://api.tumblr.com/console//calls/blog/posts", 58 | "User-Agent":"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 " \ 59 | "(KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36" 60 | } 61 | 62 | ss = requests.session() 63 | ss.headers.update(headers) 64 | 65 | PROXY = None 66 | 67 | class Error(Exception): 68 | def __init__(self, msg): 69 | self.msg = msg 70 | def __str__(self): 71 | return self.msg 72 | 73 | def reset_statistic_params(): 74 | NET_ERRORS.value = 0 75 | UNCOMPLETION.value = 0 76 | DOWNLOAD_ERRORS.value = 0 77 | DOWNLOADS.value = 0 78 | COMPLETION.value = 0 79 | OFFSET.value = 0 80 | 81 | def play(urls, args): 82 | for url in urls: 83 | tumblr = Tumblr(args, url) 84 | while True: 85 | items = tumblr.get_item_generator() 86 | if not items: 87 | break 88 | play_do(items, args.quiet) 89 | 90 | def play_do(items, quiet): 91 | for item in items: 92 | num = random.randint(0, 7) % 8 93 | col = s % (2, num + 90, item['durl']) 94 | print ' ++ play:', col 95 | quiet = ' --really-quiet' if quiet else '' 96 | cmd = 'mpv%s --no-ytdl --cache-default 20480 --cache-secs 120 ' \ 97 | '--http-header-fields "User-Agent:%s" ' \ 98 | '"%s"' \ 99 | % (quiet, headers['User-Agent'], item['durl']) 100 | 101 | os.system(cmd) 102 | timeout = 1 103 | ii, _, _ = select.select([sys.stdin], [], [], timeout) 104 | if ii: 105 | sys.exit(0) 106 | else: 107 | pass 108 | 109 | def remove_downloaded_items(items): 110 | N = len(items) 111 | for i in range(N): 112 | item = items.pop() 113 | filepath = os.path.join(item['dir_'], item['subdir'], item['filename']) 114 | if not os.path.exists(filepath): 115 | items.appendleft(item) 116 | 117 | def download_run(item): 118 | filepath = os.path.join(item['dir_'], item['subdir'], item['filename']) 119 | # if os.path.exists(filepath): 120 | # return None 121 | # num = random.randint(0, 7) % 8 122 | # col = s % (1, num + 90, filepath) 123 | # print ' ++ download: %s' % col 124 | 125 | if PROXY: 126 | cmd = ' '.join([ 127 | 'curl', '-s', '-x', '"%s"' % PROXY, '-o', '"%s.tmp"' % filepath, 128 | '-H', '"User-Agent: %s"' % headers['User-Agent'], 129 | '"%s"' % item['durl'] 130 | ]) 131 | else: 132 | cmd = ' '.join([ 133 | 'curl', '-s', '-o', '"%s.tmp"' % filepath, 134 | '-H', '"User-Agent: %s"' % headers['User-Agent'], 135 | '"%s"' % item['durl'] 136 | ]) 137 | status = os.system(cmd) 138 | return status, filepath 139 | 140 | def callback(filepath): 141 | os.rename('%s.tmp' % filepath, filepath) 142 | 143 | class Downloader(multiprocessing.Process): 144 | def __init__(self, queue, lock): 145 | super(Downloader, self).__init__() 146 | self.queue = queue 147 | self.daemon = True 148 | self.lock = lock 149 | 150 | def run(self): 151 | while True: 152 | item = self.queue.get() 153 | self.queue.task_done() 154 | if not item: 155 | break 156 | status = download_run(item) 157 | if not status: # file was downloaded. 158 | continue 159 | status, filepath = status 160 | if status != 0: 161 | # print s % (1, 93, '[Error %s] at wget' % status), wget_es[status] 162 | self.lock.acquire() 163 | UNCOMPLETION.value += 1 164 | DOWNLOAD_ERRORS.value += 1 165 | self.lock.release() 166 | else: 167 | self.lock.acquire() 168 | DOWNLOADS.value += 1 169 | self.lock.release() 170 | callback(filepath) 171 | 172 | class TumblrAPI(object): 173 | def _request(self, base_hostname, target, type, params): 174 | api_url = '/'.join(['https://api.tumblr.com/v2/blog', 175 | base_hostname, target, type]) 176 | params['api_key'] = API_KEY 177 | if PROXY: 178 | proxies = {'http': PROXY, 'https': PROXY} 179 | else: 180 | proxies = None 181 | while True: 182 | try: 183 | res = ss.get(api_url, params=params, proxies=proxies, timeout=10) 184 | json_data = res.json() 185 | break 186 | except KeyboardInterrupt: 187 | sys.exit() 188 | except Exception as e: 189 | NET_ERRORS.value += 1 # count errors 190 | print s % (1, 93, '[Error at requests]:'), e, '\n' 191 | time.sleep(5) 192 | if json_data['meta']['msg'].lower() != 'ok': 193 | raise Error(s % (1, 91, json_data['meta']['msg'])) 194 | 195 | return json_data['response'] 196 | 197 | def _info(self, base_hostname): 198 | return self._request(base_hostname, 'info', '', None) 199 | 200 | def _photo(self, base_hostname, offset='', tag='', post_id='', to_items=True): 201 | def make_items(raw_data): 202 | items = collections.deque() 203 | for i in raw_data['posts']: 204 | index = 1 205 | if i.get('photos'): 206 | for ii in i['photos']: 207 | durl = ii['original_size']['url'].replace('http:', 'https:') 208 | filename = os.path.join( 209 | '%s_%s.%s' % (i['id'], index, durl.split('.')[-1])) 210 | t = { 211 | 'durl': durl, 212 | 'filename': filename, 213 | 'key': i['timestamp'], 214 | 'subdir': 'photos', 215 | } 216 | index += 1 217 | items.append(t) 218 | return items 219 | 220 | params = { 221 | 'offset': offset, 222 | 'before': offset if tag else '', 223 | 'tag': tag, 224 | 'id': post_id, 225 | 'limit': 20 if not tag and not post_id else '', 226 | 'filter': 'text' 227 | } 228 | raw_data = self._request(base_hostname, 'posts', 'photo', params) 229 | if to_items: 230 | return make_items(raw_data) 231 | else: 232 | return raw_data 233 | 234 | def _audio(self, base_hostname, offset='', tag='', post_id='', to_items=True): 235 | def make_items(raw_data): 236 | items = collections.deque() 237 | for i in raw_data['posts']: 238 | durl = i['audio_url'].replace('http:', 'https:') 239 | filename = os.path.join( 240 | '%s_%s.%s' % (i['id'], i['track_name'], durl.split('.')[-1])) 241 | t = { 242 | 'durl': durl, 243 | 'filename': filename, 244 | 'timestamp': i['timestamp'] if tag else '', 245 | 'subdir': 'audios' 246 | } 247 | items.append(t) 248 | return items 249 | 250 | params = { 251 | 'offset': offset, 252 | 'before': offset if tag else '', 253 | 'tag': tag, 254 | 'id': post_id, 255 | 'limit': 20 if not tag and not post_id else '', 256 | 'filter': 'text' 257 | } 258 | raw_data = self._request(base_hostname, 'posts', 'audio', params) 259 | if to_items: 260 | return make_items(raw_data) 261 | else: 262 | return raw_data 263 | 264 | def _video(self, base_hostname, offset='', tag='', post_id='', to_items=True): 265 | def make_items(raw_data): 266 | items = collections.deque() 267 | for i in raw_data['posts']: 268 | if not i.get('video_url'): 269 | continue 270 | durl = i['video_url'].replace('http:', 'https:') 271 | filename = os.path.join( 272 | '%s.%s' % (i['id'], durl.split('.')[-1])) 273 | t = { 274 | 'durl': durl, 275 | 'filename': filename, 276 | 'timestamp': i['timestamp'] if tag else '', 277 | 'subdir': 'videos' 278 | } 279 | items.append(t) 280 | return items 281 | 282 | params = { 283 | 'offset': offset, 284 | 'before': offset if tag else '', 285 | 'tag': tag, 286 | 'id': post_id, 287 | 'limit': 20 if not tag and not post_id else '', 288 | 'filter': 'text' 289 | } 290 | raw_data = self._request(base_hostname, 'posts', 'video', params) 291 | if to_items: 292 | return make_items(raw_data) 293 | else: 294 | return raw_data 295 | 296 | class Tumblr(TumblrAPI): 297 | def __init__(self, args, url): 298 | self.args = args 299 | self.offset = self.args.offset 300 | self.make_items = self.parse_urls(url) 301 | 302 | def save_json(self): 303 | with open(self.json_path, 'w') as g: 304 | g.write(json.dumps( 305 | {'offset': self.offset}, indent=4, sort_keys=True)) 306 | 307 | def init_infos(self, base_hostname, target_type, tag=''): 308 | self.infos = {'host': base_hostname} 309 | if not tag: 310 | dir_ = os.path.join(os.getcwd(), self.infos['host']) 311 | json_path = os.path.join(dir_, 'json.json') 312 | 313 | if not os.path.exists(dir_): 314 | if not self.args.play: 315 | os.makedirs(dir_) 316 | else: 317 | if os.path.exists(json_path): 318 | self.offset = json.load(open(json_path))['offset'] - 60 \ 319 | if not self.args.update else self.args.offset 320 | if self.offset < 0: self.offset = 0 321 | else: 322 | dir_ = os.path.join(os.getcwd(), 'tumblr-%s' % tag) 323 | json_path = os.path.join(dir_, 'json.json') 324 | 325 | if not os.path.exists(dir_): 326 | if not self.args.play: 327 | os.makedirs(dir_) 328 | self.offset = int(time.time()) 329 | else: 330 | if os.path.exists(json_path): 331 | self.offset = json.load(open(json_path))['offset'] \ 332 | if not self.args.update else int(time.time()) 333 | 334 | self.infos['dir_'] = dir_ 335 | self.json_path = json_path 336 | subdir = os.path.join(dir_, target_type) 337 | if not os.path.exists(subdir) and not self.args.play: 338 | os.makedirs(subdir) 339 | 340 | if not self.args.play: 341 | for fl in os.listdir(subdir): 342 | if not fl.endswith('.tmp'): 343 | COMPLETION.value += 1 344 | else: 345 | UNCOMPLETION.value += 1 346 | 347 | if self.args.offset: 348 | self.offset = self.args.offset 349 | 350 | print s % (1, 92, '## begin:'), 'offset = %s,' % self.offset, base_hostname 351 | print s % (1, 97, 'INFO:\n') + \ 352 | 'D = Downloads, R = Repair_Need\n' + \ 353 | 'C = Completion, NE = Net_Errors, O = Offset' 354 | 355 | def download_photos_by_offset(self, base_hostname, post_id): 356 | self.init_infos(base_hostname, 'photos') 357 | 358 | def do(): 359 | items = self._photo( 360 | base_hostname, offset=self.offset if not post_id else '', post_id=post_id) 361 | if not items: 362 | return [] 363 | self.offset += 20 364 | self.save_json() 365 | return items 366 | return do 367 | 368 | def download_photos_by_tag(self, base_hostname, tag): 369 | self.init_infos(base_hostname, 'photos', tag=tag) 370 | 371 | def do(): 372 | items = self._photo(base_hostname, tag=tag, before=self.offset) 373 | if not items: 374 | return [] 375 | self.offset = items[-1]['timestamp'] 376 | self.save_json() 377 | return items 378 | return do 379 | 380 | def download_videos_by_offset(self, base_hostname, post_id): 381 | self.init_infos(base_hostname, 'videos') 382 | 383 | def do(): 384 | items = self._video( 385 | base_hostname, offset=self.offset, post_id=post_id) 386 | if not items: 387 | return [] 388 | self.offset += 20 389 | if not self.args.play: 390 | self.save_json() 391 | return items 392 | return do 393 | 394 | def download_videos_by_tag(self, base_hostname, tag): 395 | self.init_infos(base_hostname, 'videos', tag) 396 | 397 | def do(): 398 | items = self._video( 399 | base_hostname, before=self.offset, tag=tag) 400 | if not items: 401 | return [] 402 | self.offset = items[-1]['timestamp'] 403 | if not self.args.play: 404 | self.save_json() 405 | return items 406 | return do 407 | 408 | def download_audios_by_offset(self, base_hostname, post_id): 409 | self.init_infos(base_hostname, 'audios') 410 | 411 | def do(): 412 | items = self._audio( 413 | base_hostname, offset=self.offset if not post_id else '', post_id=post_id) 414 | if not items: 415 | return [] 416 | self.offset += 20 417 | if not self.args.play: 418 | self.save_json() 419 | return items 420 | return do 421 | 422 | def download_audios_by_tag(self, base_hostname, tag): 423 | self.init_infos(base_hostname, 'audios', tag) 424 | 425 | def do(): 426 | items = self._audio( 427 | base_hostname, before=self.offset, tag=tag) 428 | if not self.infos['items']: 429 | return [] 430 | self.offset = self.infos['items'][-1]['timestamp'] 431 | if not self.args.play: 432 | self.save_json() 433 | return items 434 | return do 435 | 436 | def download_photos(self, base_hostname, post_id='', tag=''): 437 | if tag: 438 | return self.download_photos_by_tag(base_hostname, tag) 439 | else: 440 | return self.download_photos_by_offset(base_hostname, post_id=post_id) 441 | 442 | def download_videos(self, base_hostname, post_id='', tag=''): 443 | if tag: 444 | return self.download_videos_by_tag(base_hostname, tag) 445 | else: 446 | return self.download_videos_by_offset(base_hostname, post_id=post_id) 447 | 448 | def download_audios(self, base_hostname, post_id='', tag=''): 449 | if tag: 450 | return self.download_audios_by_tag(base_hostname, tag) 451 | else: 452 | return self.download_audios_by_offset(base_hostname, post_id=post_id) 453 | 454 | def fix_photos(self, base_hostname): 455 | self.init_infos(base_hostname, 'photos') 456 | 457 | t = os.listdir(os.path.join(self.infos['dir_'], 'photos')) 458 | t = [i[:i.find('_')] for i in t if i.endswith('.tmp')] 459 | self.post_ids = list(set(t)) 460 | 461 | def do(): 462 | if len(self.post_ids): 463 | post_id = self.post_ids.pop() 464 | return self._photo(base_hostname, post_id=post_id) 465 | else: 466 | return [] 467 | return do 468 | 469 | def parse_urls(self, url): 470 | _mod = re.search(r'(http://|https://|)(?P.+\.tumblr.com)', url) 471 | if not _mod: 472 | print s % (1, 91, '[Error]:'), 'url is illegal.', '\n' + url.decode('utf8', 'ignore') 473 | return lambda: [] 474 | base_hostname = _mod.group('hostname') 475 | if self.args.check: 476 | return self.fix_photos(base_hostname) 477 | 478 | if re.search(r'post/(\d+)', url): 479 | post_id = re.search(r'post/(\d+)', url).group(1) 480 | else: 481 | post_id = '' 482 | 483 | if self.args.video: 484 | return self.download_videos(base_hostname, post_id=post_id, tag=self.args.tag) 485 | elif self.args.audio: 486 | return self.download_audios(base_hostname, post_id=post_id, tag=self.args.tag) 487 | else: 488 | return self.download_photos(base_hostname, post_id=post_id, tag=self.args.tag) 489 | 490 | def get_item_generator(self): 491 | OFFSET.value = self.offset 492 | items = self.make_items() 493 | for item in items: 494 | item['dir_'] = self.infos['dir_'] 495 | return items 496 | 497 | def args_handler(argv): 498 | p = argparse.ArgumentParser( 499 | description='download from tumblr.com') 500 | p.add_argument('xxx', type=str, nargs='*', help='命令对象.') 501 | p.add_argument('-p', '--processes', action='store', type=int, default=10, 502 | help='指定多进程数,默认为10个,最多为20个 eg: -p 20') 503 | p.add_argument('-f', '--offset', action='store', type=int, default=0, 504 | help='offset') 505 | p.add_argument('-q', '--quiet', action='store_true', 506 | help='quiet') 507 | p.add_argument('-c', '--check', action='store_true', 508 | help='尝试修复未下载成功的图片') 509 | p.add_argument('-P', '--play', action='store_true', 510 | help='play with mpv') 511 | p.add_argument('-V', '--video', action='store_true', 512 | help='download videos') 513 | p.add_argument('-A', '--audio', action='store_true', 514 | help='download audios') 515 | p.add_argument('-t', '--tag', action='store', 516 | default=None, type=str, 517 | help='下载特定tag的图片, eg: -t beautiful') 518 | p.add_argument('--update', action='store_true', 519 | help='update new things') 520 | p.add_argument('--redownload', action='store_true', 521 | help='redownload all things') 522 | p.add_argument('-x', '--proxy', type=str, 523 | help='redownload all things') 524 | args = p.parse_args(argv[1:]) 525 | xxx = args.xxx 526 | 527 | if args.proxy: 528 | if args.proxy[:4] not in ('http', 'sock'): 529 | print s % (1, 91, '[Error]:'), 'proxy must have a protocol:// prefix' 530 | sys.exit(1) 531 | else: 532 | global PROXY 533 | PROXY = args.proxy 534 | 535 | if args.redownload: args.update = True 536 | return args, xxx 537 | 538 | def print_msg(check): 539 | time.sleep(2) # initial interval 540 | 541 | while True: 542 | msg = "\r%s, %s, %s, %s, %s " % \ 543 | ( 544 | 'D: ' + s % (1, 92, DOWNLOADS.value), 545 | 'R: ' + s % (1, 93, UNCOMPLETION.value \ 546 | if not check \ 547 | else UNCOMPLETION.value - DOWNLOAD_ERRORS.value - DOWNLOADS.value), 548 | 'C: ' + s % (1, 97, COMPLETION.value + DOWNLOADS.value), 549 | 'NE: ' + s % (1, 91, NET_ERRORS.value), 550 | 'O: %s' % OFFSET.value 551 | ) 552 | sys.stdout.write(msg) 553 | sys.stdout.flush() 554 | time.sleep(2) 555 | 556 | def sighandler(signum, frame): 557 | # print s % (1, 91, "\n !! Signal:"), signum 558 | # print s % (1, 91, " !! Frame: %s" % frame) 559 | sys.exit() 560 | 561 | def handle_signal(): 562 | signal.signal(signal.SIGBUS, sighandler) 563 | signal.signal(signal.SIGHUP, sighandler) 564 | # http://stackoverflow.com/questions/14207708/ioerror-errno-32-broken-pipe-python 565 | signal.signal(signal.SIGPIPE, signal.SIG_DFL) 566 | signal.signal(signal.SIGQUIT, sighandler) 567 | signal.signal(signal.SIGSYS, sighandler) 568 | 569 | signal.signal(signal.SIGABRT, sighandler) 570 | signal.signal(signal.SIGFPE, sighandler) 571 | signal.signal(signal.SIGILL, sighandler) 572 | signal.signal(signal.SIGINT, sighandler) 573 | signal.signal(signal.SIGSEGV, sighandler) 574 | signal.signal(signal.SIGTERM, sighandler) 575 | 576 | def main(argv): 577 | handle_signal() 578 | args, xxx = args_handler(argv) 579 | 580 | if args.play: 581 | play(xxx, args) 582 | 583 | lock = multiprocessing.Lock() 584 | queue = multiprocessing.JoinableQueue(maxsize=args.processes) 585 | thrs = [] 586 | for i in range(args.processes): 587 | thr = Downloader(queue, lock) 588 | thr.start() 589 | thrs.append(thr) 590 | 591 | # massage thread 592 | msg_thr = multiprocessing.Process(target=print_msg, args=(args.check,)) 593 | msg_thr.daemon = True 594 | msg_thr.start() 595 | 596 | for url in xxx: 597 | reset_statistic_params() 598 | tumblr = Tumblr(args, url) 599 | not_add = 0 600 | while True: 601 | items = tumblr.get_item_generator() 602 | if not items: 603 | break 604 | 605 | # Check the downloaded items. 606 | # It will be exited, if there is no new item to download 607 | # in 5 loops, unless with --redownload 608 | remove_downloaded_items(items) 609 | if not args.redownload: 610 | if not items: 611 | not_add += 1 612 | if not_add > 5: 613 | print s % (1, 93, '\n[Warning]:'), \ 614 | 'There is nothing new to download in 5 loops.\n', \ 615 | 'If you want to scan all resources, using --redownload\n' \ 616 | 'or running the script again to next 5 loops.' 617 | break 618 | continue 619 | else: 620 | not_add = 0 621 | 622 | for item in items: 623 | queue.put(item) 624 | 625 | while not queue.empty(): 626 | time.sleep(2) 627 | 628 | for i in range(args.processes): 629 | queue.put(None) 630 | 631 | queue.join() 632 | 633 | for thr in thrs: 634 | thr.join() 635 | 636 | msg_thr.terminate() 637 | 638 | if __name__ == '__main__': 639 | argv = sys.argv 640 | main(argv) 641 | -------------------------------------------------------------------------------- /unzip.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # -*- coding: utf-8 -*- 3 | 4 | import os 5 | import sys 6 | import zipfile 7 | import argparse 8 | 9 | s = '\x1b[%d;%dm%s\x1b[0m' # terminual color template 10 | 11 | def unzip(path): 12 | 13 | file = zipfile.ZipFile(path,"r") 14 | if args.secret: 15 | file.setpassword(args.secret) 16 | 17 | for name in file.namelist(): 18 | try: 19 | utf8name=name.decode('gbk') 20 | pathname = os.path.dirname(utf8name) 21 | except: 22 | utf8name=name 23 | pathname = os.path.dirname(utf8name) 24 | 25 | #print s % (1, 92, ' >> extracting:'), utf8name 26 | #pathname = os.path.dirname(utf8name) 27 | if not os.path.exists(pathname) and pathname != "": 28 | os.makedirs(pathname) 29 | data = file.read(name) 30 | if not os.path.exists(utf8name): 31 | try: 32 | fo = open(utf8name, "w") 33 | fo.write(data) 34 | fo.close 35 | except: 36 | pass 37 | file.close() 38 | 39 | def main(argv): 40 | ###################################################### 41 | # for argparse 42 | p = argparse.ArgumentParser(description='解决unzip乱码') 43 | p.add_argument('xxx', type=str, nargs='*', \ 44 | help='命令对象.') 45 | p.add_argument('-s', '--secret', action='store', \ 46 | default=None, help='密码') 47 | global args 48 | args = p.parse_args(argv[1:]) 49 | xxx = args.xxx 50 | 51 | for path in xxx: 52 | if path.endswith('.zip'): 53 | if os.path.exists(path): 54 | print s % (1, 97, ' ++ unzip:'), path 55 | unzip(path) 56 | else: 57 | print s % (1, 91, ' !! file doesn\'t exist.'), path 58 | else: 59 | print s % (1, 91, ' !! file isn\'t a zip file.'), path 60 | 61 | if __name__ == '__main__': 62 | argv = sys.argv 63 | main(argv) 64 | -------------------------------------------------------------------------------- /yunpan.360.cn.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | # vim: set fileencoding=utf8 3 | 4 | import os 5 | import sys 6 | from getpass import getpass 7 | import requests 8 | import urllib 9 | import json 10 | import re 11 | import time 12 | import argparse 13 | import random 14 | import md5 15 | 16 | ############################################################ 17 | # wget exit status 18 | wget_es = { 19 | 0: "No problems occurred.", 20 | 2: "User interference.", 21 | 1<<8: "Generic error code.", 22 | 2<<8: "Parse error - for instance, when parsing command-line " \ 23 | "optio.wgetrc or .netrc...", 24 | 3<<8: "File I/O error.", 25 | 4<<8: "Network failure.", 26 | 5<<8: "SSL verification failure.", 27 | 6<<8: "Username/password authentication failure.", 28 | 7<<8: "Protocol errors.", 29 | 8<<8: "Server issued an error response." 30 | } 31 | ############################################################ 32 | 33 | s = '\x1b[%d;%dm%s\x1b[0m' # terminual color template 34 | 35 | cookie_file = os.path.join(os.path.expanduser('~'), '.360.cookies') 36 | 37 | headers = { 38 | "Accept":"text/html,application/xhtml+xml,application/xml; " \ 39 | "q=0.9,image/webp,*/*;q=0.8", 40 | "Accept-Encoding":"text/html", 41 | "Accept-Language":"en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4,zh-TW;q=0.2", 42 | "Content-Type":"application/x-www-form-urlencoded", 43 | "Referer":"http://yunpan.360.cn/", 44 | "X-Requested-With":"XMLHttpRequest", 45 | "User-Agent":"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 "\ 46 | "(KHTML, like Gecko) Chrome/32.0.1700.77 Safari/537.36" 47 | } 48 | 49 | ss = requests.session() 50 | ss.headers.update(headers) 51 | 52 | class yunpan360(object): 53 | def init(self): 54 | if os.path.exists(cookie_file): 55 | try: 56 | t = json.loads(open(cookie_file).read()) 57 | ss.cookies.update(t.get('cookies', t)) 58 | if not self.check_login(): 59 | print s % (1, 91, ' !! cookie is invalid, please login\n') 60 | sys.exit(1) 61 | except: 62 | g = open(cookie_file, 'w') 63 | g.close() 64 | print s % (1, 97, ' please login') 65 | sys.exit(1) 66 | else: 67 | print s % (1, 91, ' !! cookie_file is missing, please login') 68 | sys.exit(1) 69 | 70 | def get_path(self, url): 71 | url = urllib.unquote_plus(url) 72 | f = re.search(r'#(.+?)(&|$)', url) 73 | if f: 74 | return f.group(1) 75 | else: 76 | return '/' 77 | 78 | def check_login(self): 79 | #print s % (1, 97, '\n -- check_login') 80 | url = 'http://yunpan.360.cn/user/login?st=774' 81 | r = ss.get(url) 82 | self.save_cookies() 83 | 84 | if r.ok: 85 | #print s % (1, 92, ' -- check_login success\n') 86 | 87 | # get apihost 88 | self.apihost = re.search(r'http://(.+?)/', r.url).group(1).encode('utf8') 89 | self.save_cookies() 90 | return True 91 | else: 92 | print s % (1, 91, ' -- check_login fail\n') 93 | return False 94 | 95 | def login(self, username, password): 96 | print s % (1, 97, '\n -- login') 97 | 98 | # get token 99 | params = { 100 | "o": "sso", 101 | "m": "getToken", 102 | "func": "QHPass.loginUtils.tokenCallback", 103 | "userName": username, 104 | "rand": random.random() 105 | } 106 | url = 'https://login.360.cn' 107 | r = ss.get(url, params=params) 108 | token = re.search(r'token":"(.+?)"', r.content).group(1) 109 | 110 | # now loin 111 | params = { 112 | "o": "sso", 113 | "m": "login", 114 | "requestScema": "http", 115 | "from": "pcw_cloud", 116 | "rtype": "data", 117 | "func": "QHPass.loginUtils.loginCallback", 118 | "userName": username, 119 | "pwdmethod": 1, 120 | "isKeepAlive": 0, 121 | "token": token, 122 | "captFlag": 1, 123 | "captId": "i360", 124 | "captCode": "", 125 | "lm": 0, 126 | "validatelm": 0, 127 | "password": md5.new(password).hexdigest(), 128 | "r": int(time.time()*1000) 129 | } 130 | url = 'https://login.360.cn' 131 | ss.get(url, params=params) 132 | self.save_cookies() 133 | 134 | def save_cookies(self): 135 | with open(cookie_file, 'w') as g: 136 | c = {'cookies': ss.cookies.get_dict()} 137 | g.write(json.dumps(c, indent=4, sort_keys=True)) 138 | 139 | def get_dlink(self, i): 140 | data = 'nid=%s&fname=%s&' % (i['nid'].encode('utf8'), \ 141 | urllib.quote_plus(i['path'].encode('utf8'))) 142 | apiurl = 'http://%s/file/download' % self.apihost 143 | r = ss.post(apiurl, data=data) 144 | j = r.json() 145 | if j['errno'] == 0: 146 | dlink = j['data']['download_url'].encode('utf8') 147 | return dlink 148 | 149 | def fix_json(self, ori): 150 | # 万恶的 360,返回的json尽然不合法。 151 | jdata = re.search(r'data:\s*\[.+?\]', ori).group() 152 | jlist = re.split(r'\}\s*,\s*\{', jdata) 153 | jlist = [l for l in jlist if l.strip()] 154 | j = [] 155 | for item in jlist: 156 | nid = re.search(r',nid: \'(\d+)\'', item) 157 | path = re.search(r',path: \'(.+?)\',nid', item) 158 | name = re.search(r'oriName: \'(.+?)\',path', item) 159 | isdir = 'isDir: ' in item 160 | if nid: 161 | t = { 162 | 'nid': nid.group(1), 163 | 'path': path.group(1).replace("\\'", "'"), 164 | 'name': name.group(1).replace("\\'", "'"), 165 | 'isdir': 1 if isdir else 0 166 | } 167 | j.append(t) 168 | return j 169 | 170 | def get_infos(self): 171 | apiurl = 'http://%s/file/list' % self.apihost 172 | data = "type" + "=2" + "&" \ 173 | "t" + "=%s" % random.random() + "&" \ 174 | "order" + "=asc" + "&" \ 175 | "field" + "=file_name" + "&" \ 176 | "path" + "=%s" + "&" \ 177 | "page" + "=0" + "&" \ 178 | "page_size" + "=10000" + "&" \ 179 | "ajax" + "=1" 180 | 181 | dir_loop = [self.path] 182 | base_dir = os.path.split(self.path[:-1])[0] if self.path[-1] == '/' \ 183 | and self.path != '/' else os.path.split(self.path)[0] 184 | for d in dir_loop: 185 | data = data % urllib.quote_plus(d) 186 | r = ss.post(apiurl, data=data) 187 | j = self.fix_json(r.text.strip()) 188 | if j: 189 | if args.type_: 190 | j = [x for x in j if x['isdir'] \ 191 | or x['name'][-len(args.type_):] \ 192 | == unicode(args.type_)] 193 | total_file = len([i for i in j if not i['isdir']]) 194 | if args.from_ - 1: 195 | j = j[args.from_-1:] if args.from_ else j 196 | nn = args.from_ 197 | for i in j: 198 | if i['isdir']: 199 | dir_loop.append(i['path'].encode('utf8')) 200 | else: 201 | t = i['path'].encode('utf8') 202 | t = t.replace(base_dir, '') 203 | t = t[1:] if t[0] == '/' else t 204 | t = os.path.join(os.getcwd(), t) 205 | infos = { 206 | 'file': t, 207 | 'dir_': os.path.split(t)[0], 208 | 'dlink': self.get_dlink(i), 209 | 'name': i['name'].encode('utf8'), 210 | 'apihost': self.apihost, 211 | 'nn': nn, 212 | 'total_file': total_file 213 | } 214 | nn += 1 215 | self.download(infos) 216 | else: 217 | print s % (1, 91, ' error: get_infos') 218 | sys.exit(0) 219 | 220 | @staticmethod 221 | def download(infos): 222 | #### !!!! 注意:360不支持断点续传 223 | 224 | ## make dirs 225 | if not os.path.exists(infos['dir_']): 226 | os.makedirs(infos['dir_']) 227 | else: 228 | if os.path.exists(infos['file']): 229 | return 0 230 | 231 | num = random.randint(0, 7) % 8 232 | col = s % (2, num + 90, infos['file']) 233 | infos['nn'] = infos['nn'] if infos.get('nn') else 1 234 | infos['total_file'] = infos['total_file'] if infos.get('total_file') else 1 235 | print '\n ++ 正在下载: #', s % (1, 97, infos['nn']), '/', s % (1, 97, infos['total_file']), '#', col 236 | 237 | cookie = '; '.join(['%s=%s' % (x, y) for x, y in ss.cookies.items()]).encode('utf8') 238 | if args.aria2c: 239 | if args.limit: 240 | cmd = 'aria2c -c -s10 -x10 ' \ 241 | '--max-download-limit %s ' \ 242 | '-o "%s.tmp" -d "%s" ' \ 243 | '--user-agent "%s" ' \ 244 | '--header "Cookie:%s" ' \ 245 | '--header "Referer:http://%s/" "%s"' \ 246 | % (args.limit, infos['name'], infos['dir_'],\ 247 | headers['User-Agent'], cookie, infos['apihost'], infos['dlink']) 248 | else: 249 | cmd = 'aria2c -c -s10 -x10 ' \ 250 | '-o "%s.tmp" -d "%s" --user-agent "%s" ' \ 251 | '--header "Cookie:%s" ' \ 252 | '--header "Referer:http://%s/" "%s"' \ 253 | % (infos['name'], infos['dir_'], headers['User-Agent'], \ 254 | cookie, infos['apihost'], infos['dlink']) 255 | else: 256 | if args.limit: 257 | cmd = 'wget -c --limit-rate %s ' \ 258 | '-O "%s.tmp" --user-agent "%s" ' \ 259 | '--header "Cookie:%s" ' \ 260 | '--header "Referer:http://%s/" "%s"' \ 261 | % (args.limit, infos['file'], headers['User-Agent'], \ 262 | cookie, infos['apihost'], infos['dlink']) 263 | else: 264 | cmd = 'wget -c -O "%s.tmp" --user-agent "%s" ' \ 265 | '--header "Cookie:%s" ' \ 266 | '--header "Referer:http://%s/" "%s"' \ 267 | % (infos['file'], headers['User-Agent'], \ 268 | cookie, infos['apihost'], infos['dlink']) 269 | 270 | status = os.system(cmd) 271 | if status != 0: # other http-errors, such as 302. 272 | wget_exit_status_info = wget_es[status] 273 | print('\n\n ---### \x1b[1;91mERROR\x1b[0m ==> '\ 274 | '\x1b[1;91m%d (%s)\x1b[0m ###--- \n\n' \ 275 | % (status, wget_exit_status_info)) 276 | print s % (1, 91, ' ===> '), cmd 277 | sys.exit(1) 278 | else: 279 | os.rename('%s.tmp' % infos['file'], infos['file']) 280 | 281 | def exists(self, filepath): 282 | pass 283 | 284 | def upload(self, path, dir_): 285 | pass 286 | 287 | def addtask(self): 288 | pass 289 | 290 | def do(self): 291 | self.get_infos() 292 | 293 | def main(argv): 294 | if len(argv) <= 1: 295 | sys.exit() 296 | 297 | ###################################################### 298 | # for argparse 299 | p = argparse.ArgumentParser(description='download from yunpan.360.com') 300 | p.add_argument('xxx', type=str, nargs='*', \ 301 | help='命令对象.') 302 | p.add_argument('-a', '--aria2c', action='store_true', \ 303 | help='download with aria2c') 304 | p.add_argument('-p', '--play', action='store_true', \ 305 | help='play with mpv') 306 | p.add_argument('-f', '--from_', action='store', \ 307 | default=1, type=int, \ 308 | help='从第几个开始下载,eg: -f 42') 309 | p.add_argument('-t', '--type_', action='store', \ 310 | default=None, type=str, \ 311 | help='要下载的文件的后缀,eg: -t mp3') 312 | p.add_argument('-l', '--limit', action='store', \ 313 | default=None, type=str, help='下载速度限制,eg: -l 100k') 314 | global args 315 | args = p.parse_args(argv[1:]) 316 | xxx = args.xxx 317 | 318 | if xxx[0] == 'login' or xxx[0] == 'g': 319 | if len(xxx[1:]) < 1: 320 | username = raw_input(s % (1, 97, ' username: ')) 321 | password = getpass(s % (1, 97, ' password: ')) 322 | elif len(xxx[1:]) == 1: 323 | username = xxx[1] 324 | password = getpass(s % (1, 97, ' password: ')) 325 | elif len(xxx[1:]) == 2: 326 | username = xxx[1] 327 | password = xxx[2] 328 | else: 329 | print s % (1, 91, ' login\n login username\n login username password') 330 | 331 | x = yunpan360() 332 | x.login(username, password) 333 | is_signin = x.check_login() 334 | if is_signin: 335 | print s % (1, 92, ' ++ login succeeds.') 336 | else: 337 | print s % (1, 91, ' login failes') 338 | 339 | elif xxx[0] == 'signout': 340 | g = open(cookie_file, 'w') 341 | g.close() 342 | 343 | else: 344 | urls = xxx 345 | x = yunpan360() 346 | x.init() 347 | for url in urls: 348 | x.path = x.get_path(url) 349 | x.do() 350 | 351 | if __name__ == '__main__': 352 | argv = sys.argv 353 | main(argv) 354 | --------------------------------------------------------------------------------