├── .gitignore
├── README.md
├── config.example.json
├── index.py
├── packages
├── beautifulsoup4-4.11.1-py3-none-any.whl
├── comtypes-1.1.11-py2.py3-none-any.whl
└── requests-2.28.1-py3-none-any.whl
├── requirements.txt
├── toIdm.py
└── tools
├── __init__.py
├── const.py
├── get_name.py
└── tool.py
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | .idea/
12 | build/
13 | develop-eggs/
14 | dist/
15 | downloads/
16 | eggs/
17 | .eggs/
18 | lib/
19 | lib64/
20 | parts/
21 | sdist/
22 | var/
23 | wheels/
24 | pip-wheel-metadata/
25 | share/python-wheels/
26 | *.egg-info/
27 | .installed.cfg
28 | *.egg
29 | MANIFEST
30 | plugins/
31 |
32 | # PyInstaller
33 | # Usually these files are written by a python script from a template
34 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
35 | *.manifest
36 | *.spec
37 |
38 | # Installer logs
39 | pip-log.txt
40 | pip-delete-this-directory.txt
41 |
42 | # Unit test / coverage reports
43 | htmlcov/
44 | .tox/
45 | .nox/
46 | .coverage
47 | .coverage.*
48 | .cache
49 | nosetests.xml
50 | coverage.xml
51 | *.cover
52 | *.py,cover
53 | .hypothesis/
54 | .pytest_cache/
55 |
56 | # Translations
57 | *.mo
58 | *.pot
59 |
60 | # Django stuff:
61 | *.log
62 | local_settings.py
63 | db.sqlite3
64 | db.sqlite3-journal
65 |
66 | # Flask stuff:
67 | instance/
68 | .webassets-cache
69 |
70 | # Scrapy stuff:
71 | .scrapy
72 |
73 | # Sphinx documentation
74 | docs/_build/
75 |
76 | # PyBuilder
77 | target/
78 |
79 | # Jupyter Notebook
80 | .ipynb_checkpoints
81 |
82 | # IPython
83 | profile_default/
84 | ipython_config.py
85 |
86 | # pyenv
87 | .python-version
88 |
89 | # pipenv
90 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
91 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
92 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
93 | # install all needed dependencies.
94 | #Pipfile.lock
95 |
96 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
97 | __pypackages__/
98 |
99 | # Celery stuff
100 | celerybeat-schedule
101 | celerybeat.pid
102 |
103 | # SageMath parsed files
104 | *.sage.py
105 |
106 | # Environments
107 | .env
108 | .venv
109 | env/
110 | venv/
111 | ENV/
112 | env.bak/
113 | venv.bak/
114 |
115 | # Spyder project settings
116 | .spyderproject
117 | .spyproject
118 |
119 | # Rope project settings
120 | .ropeproject
121 |
122 | # mkdocs documentation
123 | /site
124 |
125 | # mypy
126 | .mypy_cache/
127 | .dmypy.json
128 | dmypy.json
129 |
130 | # Pyre type checker
131 | .pyre/
132 |
133 | # config
134 | *.json
135 | !*.example.*
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # 网赚盘解析真实下载地址
2 |
3 | 利用解析网站获取网赚盘下载地址并推送到aria2或IDM
4 |
5 | | 支持网盘 | | |
6 | |-------|--------|---------|
7 | | 飞猫云√ | 567√ | 库云√ |
8 | | EXP√ | 茉香 | 火箭云 |
9 | | 雪球云盘√ | 先锋云√ | DUFILE√ |
10 | | 星耀云盘√ | 贵族√ | Rose√ |
11 | | 520 | 46 | 77 |
12 | | RAR√ | OWN√ | 蜜蜂 |
13 | | 飞鱼盘√ | YIFILE | 台湾云 |
14 | | SKY√ | | |
15 |
16 | **网站名称后面有√的表示支持自动解析文件名**
17 |
18 | _想要添加自动解析功能,请提交issue,标题写网盘名称,内容附上网盘文件的分享链接_
19 |
20 | 解析网站:
21 |
22 | 卡密购买地址:
23 |
24 | 感谢@Eric
25 | Brown提供的调用IDM代码 [教程](https://stackoverflow.com/questions/22587681/use-idminternet-download-manager-api-with-python)
26 |
27 | ## 使用教程
28 |
29 | 1. 下载本项目
30 | 2. 将config.example.json重命名为config.json
31 | 3. 填写配置文件 [教程](#jump1)
32 | 4. 安装依赖 [教程](#jump2)
33 |
34 | ## 配置文件解释
35 |
36 | ```json
37 | {
38 | "card": "卡密",
39 | "aria2_rpc": "如果使用idm下载,此项为空",
40 | "auto_name": "false或者true #开启此功能有助于获取正确的文件名,但是会降低解析速度",
41 | "aria2_token": "如果使用idm下载,此项为空",
42 | "download_path": "下载目录",
43 | "proxies": "填写代理地址(http://127.0.0.1:7890)或者置空"
44 | }
45 | ```
46 |
47 | ## 依赖安装教程
48 |
49 | 执行`pip3 install -r requirements.txt`
50 |
51 | 或者依次执行
52 |
53 | ```commandline
54 | pip3 install packages/beautifulsoup4-4.11.1-py3-none-any.whl
55 | pip3 install packages/comtypes-1.1.11-py2.py3-none-any.whl
56 | pip3 install packages/requests-2.28.1-py3-none-any.whl
57 | ```
58 |
59 | ## 注意事项
60 |
61 | * 不保证代码长期有效
62 | * 解析网站和淘宝店铺与本人无关,不保证长期有效
63 | * 综上,卡密购买时长请慎重考虑
64 |
65 |
66 | ## 贡献者们
67 |
68 | > 感谢所有让这个项目变得更好的贡献者们!
69 |
70 | [](https://github.com/holll/wzjx/graphs/contributors)
71 |
72 | ## Star历史
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
--------------------------------------------------------------------------------
/config.example.json:
--------------------------------------------------------------------------------
1 | {
2 | "card": "",
3 | "auto_name": "",
4 | "aria2_rpc": "",
5 | "aria2_token": "",
6 | "download_path": ""
7 | }
--------------------------------------------------------------------------------
/index.py:
--------------------------------------------------------------------------------
1 | import asyncio
2 | import base64
3 | import hashlib
4 | import json
5 | import os
6 | import platform
7 | import re
8 | import sys
9 |
10 | import tools.tool as tool
11 | from tools import const, get_name
12 |
13 | if platform.system() == 'Windows':
14 | import toIdm
15 | import pyperclip
16 |
17 | config_path = './config.json'
18 | s = tool.myRequests()
19 |
20 |
21 | def init():
22 | with open(config_path, 'r', encoding='utf-8') as f:
23 | config = json.load(f)
24 | for key in config:
25 | os.environ[key] = config[key]
26 | print(f'初始化配置完成,打印关键参数(自动获取文件名:{os.getenv("auto_name")})')
27 | print(f'卡密:{os.environ["card"]}\nRPC地址:{os.environ["aria2_rpc"]}')
28 | print(f'aria2_token:{config.get("aria2_token")}\n下载地址:{config.get("download_path")}')
29 | sys.stdout.flush()
30 |
31 |
32 | def download(url, referer, name, is_xc: str):
33 | def downl_idm(url, referer, name):
34 | toIdm.download(url, os.environ['download_path'], name, referer)
35 |
36 | def downl_aria2(url, referer, name):
37 | RPC_url = os.environ['aria2_rpc']
38 | json_rpc = json.dumps({
39 | 'id': hashlib.md5(url.encode(encoding='UTF-8')).hexdigest(),
40 | 'jsonrpc': '2.0',
41 | 'method': 'aria2.addUri',
42 | 'params': [
43 | f'token:{os.environ["aria2_token"]}',
44 | [url],
45 | {'dir': os.environ['download_path'], 'out': name, 'referer': referer}]
46 | })
47 | try:
48 | response = s.post(url=RPC_url, data=json_rpc)
49 | if response.status_code == 200:
50 | print(f'下载任务{name}添加成功\n', flush=True)
51 | else:
52 | print(f'下载任务{name}创建失败', name, flush=True)
53 | print(f'续传码:XC://{xc_ma}', flush=True)
54 | if platform.system() == 'Windows':
55 | pyperclip.copy(f'XC://{xc_ma}')
56 | print('已将续传码复制到剪贴板', flush=True)
57 | except Exception as e:
58 | print(f'添加任务失败,错误原因{e.__class__.__name__}', flush=True)
59 | print(f'续传码:XC://{xc_ma}', flush=True)
60 | if platform.system() == 'Windows':
61 | pyperclip.copy(f'XC://{xc_ma}')
62 | print('已将续传码复制到剪贴板', flush=True)
63 |
64 | if is_xc != '':
65 | xc_ma = is_xc.replace('XC://', '')
66 | tmp_data = base64.b64decode(xc_ma).decode().split('###')
67 | url = tmp_data[0]
68 | referer = tmp_data[1]
69 | name = tmp_data[2]
70 | else:
71 | xc_ma = base64.b64encode(f'{url}###{referer}###{name}'.encode()).decode()
72 | if os.environ.get('xc') is not None:
73 | print(f'XC://{xc_ma}')
74 | return
75 | if len(os.environ['aria2_rpc']) == 0:
76 | downl_idm(url, referer, name)
77 | else:
78 | downl_aria2(url, referer, name)
79 |
80 |
81 | async def main():
82 | init()
83 | while True:
84 | url = input('\n请输入下载链接/续传码:')
85 | if 'XC://' in url:
86 | download('', '', '', is_xc=url)
87 | else:
88 | name, return_data = await asyncio.gather(get_name.get_name(url), tool.jiexi(s, url))
89 | if return_data['code'] != 200:
90 | print(return_data['msg'])
91 | continue
92 | down_link = tool.select_link(return_data['links'])
93 | url_domain = re.search(const.domain_reg, down_link).group()
94 | print(f'获取下载链接{url_domain}...成功\n{return_data.get("end_time")},请记得及时续费', flush=True)
95 | download(down_link, name[1], name[0], is_xc='')
96 |
97 |
98 | if __name__ == '__main__':
99 | args = sys.argv
100 | if len(args) == 1:
101 | config_path = './config.json'
102 | else:
103 | config_path = args[1]
104 | if sys.version_info < (3, 7):
105 | asyncio.get_event_loop().run_until_complete(main())
106 | else:
107 | asyncio.run(main())
108 |
--------------------------------------------------------------------------------
/packages/beautifulsoup4-4.11.1-py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/holll/wzjx/51582107d5f545b589092aae63b5ababa8459e3f/packages/beautifulsoup4-4.11.1-py3-none-any.whl
--------------------------------------------------------------------------------
/packages/comtypes-1.1.11-py2.py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/holll/wzjx/51582107d5f545b589092aae63b5ababa8459e3f/packages/comtypes-1.1.11-py2.py3-none-any.whl
--------------------------------------------------------------------------------
/packages/requests-2.28.1-py3-none-any.whl:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/holll/wzjx/51582107d5f545b589092aae63b5ababa8459e3f/packages/requests-2.28.1-py3-none-any.whl
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | beautifulsoup4>=4.11.1
2 | comtypes>=1.1.14
3 | pyperclip>=1.8.2
4 | requests>=2.26.0
5 | urllib3>=1.26.7
6 |
--------------------------------------------------------------------------------
/toIdm.py:
--------------------------------------------------------------------------------
1 | import comtypes.client as cc
2 |
3 | cc.GetModule(["{ECF21EAB-3AA8-4355-82BE-F777990001DD}", 1, 0])
4 | # not sure about the syntax here, but cc.GetModule will tell you the name of the wrapper it generated
5 | import comtypes.gen.IDManLib as IDMan
6 |
7 | idm1 = cc.CreateObject("IDMan.CIDMLinkTransmitter", None, None, IDMan.ICIDMLinkTransmitter2)
8 |
9 |
10 | def download(url, path, name=None, referrer=None):
11 | idm1.SendLinkToIDM(url, referrer, None, None, None, None, path, name, 1)
12 |
--------------------------------------------------------------------------------
/tools/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/holll/wzjx/51582107d5f545b589092aae63b5ababa8459e3f/tools/__init__.py
--------------------------------------------------------------------------------
/tools/const.py:
--------------------------------------------------------------------------------
1 | white_domain = [
2 | 'rosefile',
3 | 'koalaclouds',
4 | 'koolaayun',
5 | 'feimaoyun',
6 | 'expfile',
7 | 'xfpan',
8 | 'skyfileos'
9 | ]
10 | pan_domain = 'http://haoduopan.cn'
11 | domain_reg = '(http|https)://.+?/'
12 |
--------------------------------------------------------------------------------
/tools/get_name.py:
--------------------------------------------------------------------------------
1 | import os
2 | import re
3 |
4 | import requests
5 | from bs4 import BeautifulSoup
6 |
7 | from tools import const
8 | from tools import tool
9 | from tools.tool import myRequests
10 |
11 |
12 | def iycdn(url) -> str:
13 | s = requests.session()
14 | domain = 'https://' + url.split('/')[-2]
15 | rep = s.get(url)
16 | soup = BeautifulSoup(rep.text, 'html.parser')
17 | renji_js_url = domain + soup.find('script').attrs['src']
18 | renji_js = s.get(renji_js_url).text
19 | import re
20 |
21 | # 使用正则表达式提取key值
22 | pattern_k = r'key\s*=\s*"([^"]+)"'
23 | pattern_v = r'value\s*=\s*"([^"]+)"'
24 | pattern_t = r'yanzheng_ip.php\?type=([^&]+)'
25 | pattern_yz = r'"(\/.+_yanzheng_ip\.php)'
26 | match_k = re.search(pattern_k, renji_js)
27 | match_v = re.search(pattern_v, renji_js)
28 | match_t = re.search(pattern_t, renji_js)
29 | match_yz = re.search(pattern_yz, renji_js)
30 |
31 | if match_k and match_v and match_t:
32 | params = {
33 | 'type': match_t.group(1),
34 | 'key': match_k.group(1),
35 | 'value': tool.md5_encode(tool.string_to_hex(match_v.group(1))),
36 | }
37 | s.get(domain + match_yz.group(1), params=params)
38 | rep = s.get(url)
39 | soup = BeautifulSoup(rep.text, 'html.parser')
40 | name = re.findall(r'>(.*?)<', soup.find('input', {'id': 'f_html'}).attrs['value'])[0]
41 | return name
42 | else:
43 | return ''
44 |
45 |
46 | def rosefile(url: str):
47 | # eg.https://rosefile.net/pm98zjeu2b/xa754.rar.html
48 | return url.split('/')[-1][:-5]
49 |
50 |
51 | def urlMod1(url: str):
52 | # eg.https://koalaclouds.com/971f6c37836c82fb/xm1901.part1.rar
53 | return url.split('/')[-1]
54 |
55 |
56 | def row_fluid(rep_text: str):
57 | # eg.https://www.567file.com/file-1387363.html
58 | # eg.https://ownfile.net/files/T09mMzQ5ODUx.html
59 | # eg.http://www.feiyupan.com/file-1400.html
60 | # eg.http://www.xunniupan.com/file-2475170.html
61 | # eg.http://www.shanxingyy.com/file-285.html
62 | soup = BeautifulSoup(rep_text, 'html.parser')
63 | return soup.find('div', {'class': 'row-fluid'}).div.h1.text
64 |
65 |
66 | def feimaoyun(url: str):
67 | # eg.https://www.feimaoyun.com/s/398y7f0l
68 | key = url.split('/')
69 | s = myRequests()
70 | rep = s.post('https://www.feimaoyun.com/index.php/down/new_detailv2', data={'code': key})
71 | if rep.status_code == 200:
72 | return rep.json()['data']['file_name']
73 | else:
74 | return input(f'解析失败,请手动填写文件名')
75 |
76 |
77 | def dufile(rep_text: str):
78 | # eg.https://dufile.com/file/0c7184f05ecdce0f.html
79 | soup = BeautifulSoup(rep_text, 'html.parser')
80 | return soup.find('h2', {'class': 'title'}).text.split(' ')[-1]
81 |
82 |
83 | def align_absbottom(rep_text: str):
84 | # eg.http://www.xingyaopan.com/fs/tuqlqxxnyzggaag
85 | # eg.http://www.kufile.net/file/QUExNTM5NDg1.html
86 | # eg.http://www.rarclouds.com/file/QUExNTE5Mjgz.html
87 | soup = BeautifulSoup(rep_text, 'html.parser')
88 | name = soup.find('title').text.split(' - ')[0]
89 | file_type = soup.find('img', {'align': 'absbottom'})['src'].split('/')[-1].split('.')[0]
90 | return f'{name}.{file_type}'
91 |
92 |
93 | def dudujb(rep_text: str):
94 | # eg.https://www.dudujb.com/file-1105754.html
95 | soup = BeautifulSoup(rep_text, 'html.parser')
96 | soup = soup.findAll('input', {'class': 'txtgray'})[-1]['value']
97 | return BeautifulSoup(soup, 'html.parser').text
98 |
99 |
100 | def new_title(url: str):
101 | # eg.http://www.xfpan.cc/file/QUExMzE4MDUx.html
102 | # eg.https://www.skyfileos.com/90ea219698c62ea5
103 | s = myRequests()
104 | rep = s.get(url.replace(r'/file/', r'/down/'), headers={'Referer': url})
105 | if rep.status_code == 200:
106 | soup = BeautifulSoup(rep.text, 'html.parser')
107 | return soup.find('title').text.split(' - ')[0]
108 | else:
109 | return input(f'解析失败,请手动填写文件名')
110 |
111 |
112 | def expfile(url: str):
113 | # eg.http://www.expfile.com/file-1464062.html
114 | s = myRequests()
115 | rep = s.get(url.replace('file-', 'down2-'))
116 | if rep.status_code == 200:
117 | soup = BeautifulSoup(rep.text, 'html.parser')
118 | return soup.find('title').text.split(' - ')[0]
119 | else:
120 | return input(f'解析失败,请手动填写文件名({url})')
121 |
122 |
123 | def titleMod1(rep_text: str):
124 | # eg.https://www.baigepan.com/s/iU36ven9Wu
125 | # eg.https://www.jisuyp.com/s/a6fm2yePRo
126 | # eg.http://www.qqupload.com/3uj4i
127 | soup = BeautifulSoup(rep_text, 'html.parser')
128 | return soup.find('title').text.split(' - ')[0]
129 |
130 |
131 | async def get_name(url):
132 | s = myRequests()
133 | if os.environ['auto_name'] == 'false':
134 | return input('文件名:'), url
135 |
136 | rep = requests.models.Response
137 | # 从链接中就可以获取文件名的网站、链接需要进行转换的网站
138 | if not tool.is_in_list(const.white_domain, url):
139 | rep = s.get(url)
140 | if rep.status_code != 200 and rep.status_code != 301 and rep.status_code != 302:
141 | return input('解析失败,请手动输入文件名:'), url
142 | url = rep.url
143 | # 针对200状态码的跳转
144 | soup = BeautifulSoup(rep.text, 'html.parser')
145 | if soup.find('meta').get('http-equiv') == 'refresh':
146 | # META http-equiv="refresh" 实现网页自动跳转
147 | url = re.search(r'[a-zA-z]+://\S*', soup.find('meta').get('content')).group()
148 | rep = s.get(url)
149 |
150 | try:
151 | if 'rosefile' in url:
152 | name = rosefile(url)
153 | elif tool.is_in_list(['koalaclouds', 'koolaayun'], url):
154 | name = urlMod1(url)
155 | elif 'feimaoyun' in url:
156 | name = feimaoyun(url)
157 | elif tool.is_in_list(['567', 'ownfile', 'feiyupan', 'xunniu', 'shanxing'], url.rsplit('/', maxsplit=1)[0]):
158 | name = row_fluid(rep.text)
159 | elif 'dufile' in url:
160 | name = dufile(rep.text)
161 | elif tool.is_in_list(['xingyao', 'xywpan', 'kufile', 'rarclouds'], url):
162 | name = align_absbottom(rep.text)
163 | elif 'dudujb' in url:
164 | name = dudujb(rep.text)
165 | elif tool.is_in_list(['xfpan', 'skyfileos'], url):
166 | name = new_title(url)
167 | elif 'expfile' in url:
168 | name = expfile(url)
169 | elif tool.is_in_list(['baigepan', 'jisuyp', 'qqupload'], url):
170 | name = titleMod1(rep.text)
171 | elif 'iycdn' in url:
172 | name = iycdn(url)
173 | else:
174 | name = input(f'暂不支持该网盘自动解析文件名,请手动填写({url}')
175 | print(f'获取文件名{name}成功', flush=True)
176 | except:
177 | return None, url
178 | return name, url
179 |
--------------------------------------------------------------------------------
/tools/tool.py:
--------------------------------------------------------------------------------
1 | import hashlib
2 | import os
3 | import platform
4 | import random
5 | import re
6 | from typing import Union
7 |
8 | import requests
9 | import urllib3
10 | from bs4 import BeautifulSoup
11 | from requests.adapters import HTTPAdapter
12 |
13 | try:
14 | import redis
15 |
16 | hasRedis = True
17 | pool_db1 = redis.ConnectionPool(host='127.0.0.1', port=6379, db=2)
18 | r_l = redis.Redis(connection_pool=pool_db1)
19 | except ModuleNotFoundError:
20 | hasRedis = False
21 | from tools import const
22 |
23 | urllib3.util.timeout.Timeout._validate_timeout = lambda *args: 10 if args[2] != 'total' else None
24 |
25 |
26 | def string_to_hex(ac_str):
27 | hex_val = ""
28 | for char in ac_str:
29 | code = ord(char)
30 | hex_val += str(code)
31 | return hex_val
32 |
33 |
34 | # 使用MD5加密
35 | def md5_encode(word):
36 | md5_hash = hashlib.md5(word.encode()).hexdigest()
37 | return md5_hash
38 |
39 |
40 | def is_in_list(arr: list, value: str):
41 | for web in arr:
42 | if web in value:
43 | return True
44 | return False
45 |
46 |
47 | class myRequests:
48 | retries = urllib3.util.retry.Retry(total=3, backoff_factor=0.1)
49 | session = requests.session()
50 | user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36'
51 | post_uri = ''
52 |
53 | def __init__(self, headers: Union[None, dict] = None):
54 | self.session.headers = {
55 | 'user-agent': myRequests.user_agent,
56 | }
57 | if headers is not None:
58 | for k, v in headers:
59 | self.session.headers[k] = v
60 | self.session.trust_env = False
61 | self.session.mount('http://', HTTPAdapter(max_retries=myRequests.retries))
62 | self.session.mount('https://', HTTPAdapter(max_retries=myRequests.retries))
63 | rep = self.session.get(const.pan_domain)
64 | soup = BeautifulSoup(rep.text, 'html.parser')
65 | self.post_uri = soup.find('form', {'id': 'diskForm'})['action']
66 |
67 | def get(self, url: str, headers: Union[None, dict] = None, params=None) -> requests.models.Response:
68 | if headers is not None:
69 | copy_headers = self.session.headers
70 | for k, v in headers:
71 | copy_headers[k] = v
72 | return self.session.get(url, headers=copy_headers, params=params)
73 | return self.session.get(url, params=params)
74 |
75 | def post(self, url: str, headers: Union[None, dict] = None, data=None) -> requests.models.Response:
76 | if headers is not None:
77 | copy_headers = self.session.headers
78 | for k, v in headers:
79 | copy_headers[k] = v
80 | return self.session.post(url, headers=copy_headers, data=data)
81 | return self.session.post(url, data=data)
82 |
83 |
84 | def select_link(links: list) -> str:
85 | if not os.getenv('auto_select'):
86 | all_domains = list()
87 | for link in links:
88 | link_domain = re.search(const.domain_reg, link).group()
89 | all_domains.append(link_domain)
90 |
91 | if len(all_domains) > 1:
92 | print("可选的下载服务器:")
93 | for i, domain in enumerate(all_domains):
94 | print(f"[{i}]: {domain}")
95 | while True:
96 | choice = input('请输入序号选择下载服务器:')
97 | if not choice.isdecimal():
98 | print(f"请输入数字序号!0-{len(all_domains)}")
99 | continue
100 | choice = int(choice)
101 | if 0 <= choice < len(all_domains):
102 | return links[choice]
103 | else:
104 | print(f"请输入正确的序号!0-{len(all_domains)}")
105 | else:
106 | return links[0]
107 | else:
108 | return random.choice(links)
109 |
110 |
111 | async def jiexi(s: requests.sessions, url: str) -> dict:
112 | return_data = {'code': 200, 'raw_url': url, 'links': [], 'msg': '', 'cache': 'miss'}
113 | if not url.endswith('#re') and hasRedis:
114 | # 判断链接命中缓存
115 | link_cache = r_l.lrange(url, 0, -1)
116 | link_cache = [link.decode('utf-8') for link in link_cache]
117 | if link_cache:
118 | return_data['cache'] = 'hit'
119 | return_data['links'] = link_cache
120 | return return_data
121 | else:
122 | url = url.replace('#re', '')
123 | data = {
124 | 'browser': '',
125 | 'url': url,
126 | 'card': os.environ['card']
127 | }
128 | try:
129 | rep = s.post(f'{const.pan_domain}{s.post_uri}', data=data)
130 | except Exception as e:
131 | print('下载链接解析失败', e.__class__.__name__)
132 | return_data['code'] = 500
133 | return_data['msg'] = '下载链接解析失败'
134 | return return_data
135 | if 'toCaptcha' in rep.url:
136 | print('遭遇到机器验证')
137 | return_data['code'] = 403
138 | return_data['msg'] = '遭遇到机器验证'
139 | if platform.system() == 'Windows':
140 | import pyperclip
141 | pyperclip.copy(f'{const.pan_domain}/toCaptcha/' + os.environ['card'])
142 | print('已将验证网址复制到剪贴板,程序将在5秒后退出')
143 | else:
144 | print(f'{const.pan_domain}/toCaptcha/' + os.environ['card'])
145 | return return_data
146 | soup = BeautifulSoup(rep.text, 'html.parser')
147 | # 解析出现预期内的异常
148 | error_html = soup.find('div', {'class': 'col text-center'})
149 | if error_html is not None:
150 | error_text = ''
151 | for p in error_html.findAll('p'):
152 | error_text += p.text.strip() + ' '
153 | return_data['code'] = 400
154 | return_data['msg'] = error_text.strip()
155 | return return_data
156 | try:
157 | scriptTags = soup.findAll('a', {'class': 'btn btn-info btn-sm'})
158 | end_time = soup.find('span', {'class': 'badge badge-pill badge-secondary'}).span.text
159 | return_data['end_time'] = end_time
160 | except Exception as e:
161 | print('错误类型是', e.__class__.__name__)
162 | print('错误明细是', e)
163 | print(soup)
164 | return_data['code'] = 500
165 | return_data['msg'] = e.__class__.__name__
166 | return return_data
167 |
168 | # 存储下载地址
169 | for script in scriptTags:
170 | if script.has_attr('aria2-link'):
171 | return_data['links'].append(script['aria2-link'])
172 |
173 | if len(return_data['links']) == 0:
174 | return_data['code'] = 400
175 | return_data['msg'] = '未获取到下载地址'
176 | else:
177 | if hasRedis:
178 | await r_l.rpush(url, *return_data['links'])
179 | await r_l.expire(url, 1 * 1 * 60 * 60)
180 | return return_data
181 |
--------------------------------------------------------------------------------