├── requirements.txt ├── README.md ├── config.json ├── download_from_file.py ├── LICENSE.txt ├── main.py ├── info └── kuaishou.py └── core └── kuaishou.py /requirements.txt: -------------------------------------------------------------------------------- 1 | requests==2.22.0 -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ButterAndButterfly/T-VideoDownloader/HEAD/README.md -------------------------------------------------------------------------------- /config.json: -------------------------------------------------------------------------------- 1 | { 2 | "cookie": "clientid=x; did=xx; client_key=xxx; ...", 3 | "saveFolder": "D:\\Downloads\\{user_name}", 4 | "fileName": "{video_id}.mp4", 5 | "tasks": [ 6 | "lolxingchen" 7 | ] 8 | } 9 | -------------------------------------------------------------------------------- /download_from_file.py: -------------------------------------------------------------------------------- 1 | from core.kuaishou import Kuaishou 2 | import json 3 | 4 | if __name__ == '__main__': 5 | ''' 6 | main.py报错的一点补救措施,用于下载user_id用户的特定视频 7 | user_id(必需), 8 | user_name(如果自定义文件名中没有那就不需要), 9 | file.txt(一行一个video_id,main.py有打印,按需删减) 10 | ''' 11 | user_id = '' 12 | user_name = '' 13 | # 读取配置 每行一个video id 14 | with open(r'config.json', "r") as file: 15 | content = file.read() 16 | config = json.loads(content) 17 | 18 | downloader = Kuaishou( 19 | cookie=config['cookie'], 20 | user_id=user_id, 21 | ) 22 | with open(r'file.txt', "r") as file: 23 | 24 | line = file.readline().strip('\r\n') 25 | while line: 26 | video = { 27 | 'user_name': user_name, 28 | 'user_id': user_id, 29 | 'video_id': line, 30 | } 31 | # 下载文件夹 32 | folder = config['saveFolder']\ 33 | .replace('{user_name}', video['user_name'])\ 34 | .replace('{user_id}', video['user_id'])\ 35 | # 下载文件名 36 | fileName = None 37 | if 'fileName' in config: 38 | fileName = config['fileName']\ 39 | .replace('{user_name}', video['user_name'])\ 40 | .replace('{user_id}', video['user_id'])\ 41 | .replace('{video_id}', video['video_id'])\ 42 | 43 | url = downloader.getUrl(video['user_id'], video['video_id']) 44 | print('下载链接: %s' % url) 45 | downloader.download(url, folder=folder, fileName=fileName) 46 | line = file.readline().strip('\r\n') 47 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | The Star And Thank Author License (SATA) 2 | 3 | Copyright © 2020 NiceLee 4 | 5 | Project Url: https://github.com/ButterAndButterfly/T-VideoDownloader 6 | 7 | Permission is hereby granted, free of charge, to any person obtaining a copy 8 | of this software and associated documentation files (the "Software"), to deal 9 | in the Software without restriction, including without limitation the rights 10 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 11 | copies of the Software, and to permit persons to whom the Software is 12 | furnished to do so, subject to the following conditions: 13 | 14 | The above copyright notice and this permission notice shall be included in 15 | all copies or substantial portions of the Software. 16 | 17 | And wait, the most important, you shall star/+1/like the project(s) in project url 18 | section above first, and then thank the author(s) in Copyright section. 19 | 20 | Here are some suggested ways: 21 | 22 | - Email the authors a thank-you letter, and make friends with him/her/them. 23 | - Report bugs or issues. 24 | - Tell friends what a wonderful project this is. 25 | - And, sure, you can just express thanks in your mind without telling the world. 26 | 27 | Contributors of this project by forking have the option to add his/her name and 28 | forked project url at copyright and project url sections, but shall not delete 29 | or modify anything else in these two sections. 30 | 31 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 32 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 33 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 34 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 35 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 36 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 37 | THE SOFTWARE. -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | from core.kuaishou import Kuaishou 2 | import json 3 | import time 4 | 5 | if __name__ == '__main__': 6 | # 读取配置 7 | with open(r'config.json', "r", encoding='utf-8') as file: 8 | content = file.read() 9 | print(content) 10 | config = json.loads(content) 11 | 12 | for task in config['tasks']: 13 | # 获取时间 14 | if 'time_min' in task: 15 | time_min = task['time_min'] 16 | elif 'time_min' in config: 17 | time_min = config['time_min'] 18 | else: 19 | time_min = None 20 | 21 | if 'time_max' in task: 22 | time_max = task['time_max'] 23 | elif 'time_max' in config: 24 | time_max = config['time_max'] 25 | else: 26 | time_max = None 27 | # 获取任务id 28 | if 'user_id' in task: 29 | user_id = task['user_id'] 30 | else: 31 | user_id = task 32 | 33 | print(time_max) 34 | downloader = Kuaishou( 35 | cookie=config['cookie'], 36 | user_id=user_id, 37 | time_min=time_min, 38 | time_max=time_max, 39 | ) 40 | videos = downloader.getVideos() 41 | for index, video in enumerate(videos): 42 | print(video['video_id']) 43 | 44 | print('正在下载 id为 %s的视频,共有%d 个' % (user_id , len(videos))) 45 | for index, video in enumerate(videos): 46 | # 下载文件夹 47 | folder = config['saveFolder']\ 48 | .replace('{user_name}', video['user_name'])\ 49 | .replace('{user_id}', video['user_id'])\ 50 | .replace('{caption}', video['caption']) 51 | # 下载文件名 52 | fileName = None 53 | if 'fileName' in config: 54 | fileName = config['fileName']\ 55 | .replace('{user_name}', video['user_name'])\ 56 | .replace('{user_id}', video['user_id'])\ 57 | .replace('{video_id}', video['video_id'])\ 58 | .replace('{caption}', video['caption']) 59 | 60 | print('下载进度: %d/%d' % (index + 1, len(videos))) 61 | dtime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(video['timestamp']/1000.0)) 62 | print('发布日期: %s' % dtime) 63 | url = downloader.getUrl(video['user_id'], video['video_id']) 64 | print('下载链接: %s' % url) 65 | downloader.download(url, folder=folder, fileName=fileName) 66 | print('%s 下载完毕' % video['caption']) 67 | -------------------------------------------------------------------------------- /info/kuaishou.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import requests 3 | import csv 4 | 5 | class Kuaishou: 6 | 7 | def __init__(self, cookie): 8 | self.cookie = cookie 9 | self.follows = [] 10 | self.headers = { 11 | 'Accept': '*/*', 12 | 'Accept-Encoding': 'gzip, deflate, br', 13 | 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2', 14 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:75.0) Gecko/20100101 Firefox/75.0', 15 | 'Content-Type': 'application/json', 16 | 'Origin': 'https://live.kuaishou.com', 17 | 'Host': 'live.kuaishou.com', 18 | 'Connection': 'keep-alive', 19 | 'Pragma': 'no-cache', 20 | 'Cache-Control': 'no-cache', 21 | 'Referer': 'https://live.kuaishou.com/cate/my-follow/all', 22 | 'Cookie': self.cookie 23 | } 24 | 25 | 26 | def getFollows(self): 27 | url = "https://live.kuaishou.com/m_graphql" 28 | pcursor = "" 29 | while pcursor != 'no_more': 30 | param = '{"operationName":"FollowQuery","variables":{"count":100,"pcursor":"%s"},"query":\ 31 | "query FollowQuery($pcursor: String, $count: Int) {\\n allFollows(pcursor: $pcursor, count: $count)\ 32 | {\\n list {\\n id\\n name\\n living\\n avatar\\n sex\\n description\\n\ 33 | counts {\\n fan\\n follow\\n photo\\n __typename\\n }\\n\ 34 | __typename\\n }\\n pcursor\\n __typename\\n }\\n}\\n"}' % (pcursor) 35 | data = requests.post(url, timeout=10, headers=self.headers, data=param) 36 | data = data.json()['data']['allFollows'] 37 | pcursor = data['pcursor'] 38 | for user in data['list']: 39 | user = { 40 | 'id': user['id'], 41 | 'name': user['name'], 42 | 'sex': user['sex'], 43 | 'avatar': user['avatar'], 44 | 'description': user['description'], 45 | } 46 | self.follows.append(user) 47 | 48 | return self.follows 49 | 50 | 51 | if __name__ == '__main__': 52 | api = Kuaishou( 53 | # cookie='', 54 | ) 55 | follows = api.getFollows() 56 | print('共有%d 个关注' % len(follows)) 57 | with open('data.csv','w',newline='', encoding='utf-8-sig') as fp: 58 | writer = csv.writer(fp) 59 | for index, user in enumerate(follows): 60 | print('%s\t%s\thttps://live.kuaishou.com/profile/%s'%(user['id'], user['name'], user['id'])) 61 | writer.writerow([user['id'],user['name'],'https://live.kuaishou.com/profile/%s'%user['id']]) 62 | -------------------------------------------------------------------------------- /core/kuaishou.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | import requests 3 | import time 4 | import os 5 | import re 6 | 7 | class Kuaishou: 8 | 9 | def __init__(self, cookie, user_id=None, time_min=None, time_max=None): 10 | self.cookie = cookie 11 | self.user_id = user_id 12 | self.time_min = time_min 13 | self.time_max = time_max 14 | self.videos = [] 15 | self.downloaded = 0 16 | self.headers = { 17 | 'Accept': '*/*', 18 | 'Accept-Encoding': 'gzip, deflate, br', 19 | 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2', 20 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:75.0) Gecko/20100101 Firefox/75.0', 21 | 'Content-Type': 'application/json', 22 | 'Origin': 'https://live.kuaishou.com', 23 | 'Host': 'live.kuaishou.com', 24 | 'Connection': 'keep-alive', 25 | 'Pragma': 'no-cache', 26 | 'Cache-Control': 'no-cache', 27 | 'Referer': 'https://live.kuaishou.com/profile/%s' % self.user_id, 28 | 'Cookie': self.cookie 29 | } 30 | self.noWaterMarkHeaders = { 31 | "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", 32 | "Accept-Language": "zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2", 33 | "Accept-Encoding": "gzip, deflate, br", 34 | "Host": "kphbeijing.m.chenzhongtech.com", 35 | 'Connection': 'keep-alive', 36 | 'Pragma': 'no-cache', 37 | 'Cache-Control': 'no-cache', 38 | "User-Agent": "Mozilla/5.0 (Android 9.0; Mobile; rv:68.0) Gecko/68.0 Firefox/68.0", 39 | 'Cookie': self.cookie 40 | } 41 | self.session = requests.Session() 42 | self.session.headers.update(self.noWaterMarkHeaders) 43 | 44 | def download(self, url, fileName=None, folder=None): 45 | if (not fileName) or fileName.find('{default}') > -1: 46 | end = url.find('?') 47 | if end > -1: 48 | default = url[url.rfind('/'):end] 49 | else: 50 | default = url[url.rfind('/'):] 51 | if fileName: 52 | fileName = fileName.replace('{default}', default) 53 | else: 54 | fileName = default 55 | 56 | if not folder: 57 | folder = 'download' 58 | 59 | path = r'%s/%s' % (folder, fileName) 60 | if not os.path.exists(path): 61 | if not os.path.exists(folder): 62 | os.makedirs(folder) 63 | 64 | with open(path, "wb") as file: 65 | response = requests.get(url, stream=True, timeout=120) 66 | for data in response.iter_content(chunk_size=1024 * 1024): 67 | file.write(data) 68 | self.downloaded += len(data) 69 | response.close() 70 | time.sleep(2) 71 | else: 72 | time.sleep(5) 73 | 74 | def getUrl(self, user_id, video_id): 75 | # url = "https://live.kuaishou.com/m_graphql" 76 | # param = '{"operationName":"SharePageQuery","variables":{"photoId":"%s",\ 77 | # "principalId":"%s"},"query":"query SharePageQuery($principalId: String, $photoId: String)\ 78 | # {\\n feedById(principalId: $principalId, photoId: $photoId) {\\n currentWork {\\n playUrl\\n\ 79 | # __typename\\n }\\n __typename\\n }\\n}\\n"}' % (video_id, user_id) 80 | # data = requests.post(url, timeout=30, headers=self.headers, data=param) 81 | # data = data.json()['data'] 82 | # ''' 83 | # 此处容易报错,应该是对请求的频率有限制 84 | # ''' 85 | # print(video_id) 86 | # url = data['feedById']['currentWork']['playUrl'] 87 | # return url 88 | # 去水印版本 出错时请使用水印版本 89 | url = "https://kphbeijing.m.chenzhongtech.com/fw/photo/%s" %video_id 90 | res = self.session.get(url, timeout=30) 91 | print(video_id) 92 | searchObj = re.search(r'srcNoMark":"(http[^"]*)', res.text) 93 | url = searchObj.group(1) 94 | return url 95 | 96 | def getVideos(self): 97 | url = "https://live.kuaishou.com/m_graphql" 98 | param = "{\"operationName\":\"privateFeedsQuery\",\"variables\":{\"principalId\":\"%s\",\ 99 | \"pcursor\":\"\",\"count\":24},\"query\":\"query privateFeedsQuery($principalId: String, \ 100 | $pcursor: String, $count: Int) {\\n privateFeeds(principalId: $principalId, pcursor: $pcursor,\ 101 | count: $count) {\\n pcursor\\n list {\\n id\\n thumbnailUrl\\n poster\\n\ 102 | workType\\n type\\n useVideoPlayer\\n imgUrls\\n imgSizes\\n magicFace\\n\ 103 | musicName\\n caption\\n location\\n liked\\n onlyFollowerCanComment\\n\ 104 | relativeHeight\\n timestamp\\n width\\n height\\n counts {\\n displayView\\n\ 105 | displayLike\\n displayComment\\n __typename\\n }\\n\ 106 | user {\\n id\\n eid\\n name\\n avatar\\n __typename\\n }\\n\ 107 | expTag\\n __typename\\n }\\n __typename\\n }\\n}\\n\"}" % self.user_id 108 | data = requests.post(url, timeout=10, headers=self.headers, data=param) 109 | #print(data.text) 110 | data = data.json()['data']['privateFeeds'] 111 | pcursor = data['pcursor'] 112 | is2Break = False 113 | time_min = None 114 | time_max = None 115 | if self.time_min: 116 | time_min = time.mktime(time.strptime(self.time_min, "%Y-%m-%d")) * 1000 117 | if self.time_max: 118 | time_max = time.mktime(time.strptime(self.time_max, "%Y-%m-%d")) * 1000 119 | for obj in data['list']: 120 | if not obj['id'] : 121 | continue 122 | video = { 123 | 'user_id': self.user_id, 124 | 'user_name': obj['user']['name'], 125 | 'video_id': obj['id'], 126 | 'workType': obj['workType'], 127 | 'caption': obj['caption'], 128 | 'timestamp': obj['timestamp'], 129 | } 130 | if(time_min and video['timestamp'] < time_min): 131 | is2Break = True 132 | break 133 | if(time_max == None or video['timestamp'] < time_max + 1000*60*60*24): 134 | if video['workType'] == 'video': 135 | self.videos.append(video) 136 | 137 | while (not is2Break) and pcursor and pcursor != 'no_more': 138 | if pcursor == "": 139 | print('cookie 失效') 140 | return 141 | param = "{\"operationName\":\"publicFeedsQuery\",\"variables\":{\"principalId\":\"%s\",\ 142 | \"pcursor\":\"%s\",\"count\":24},\"query\":\"query publicFeedsQuery($principalId: String,\ 143 | $pcursor: String, $count: Int) {\\n publicFeeds(principalId: $principalId, pcursor: $pcursor, count: $count)\ 144 | {\\n pcursor\\n live {\\n user {\\n id\\n avatar\\n name\\n __typename\\n\ 145 | }\\n watchingCount\\n poster\\n coverUrl\\n caption\\n id\\n playUrls {\\n\ 146 | quality\\n url\\n __typename\\n }\\n quality\\n gameInfo {\\n category\\n\ 147 | name\\n pubgSurvival\\n type\\n kingHero\\n __typename\\n }\\n hasRedPack\\n\ 148 | liveGuess\\n expTag\\n __typename\\n }\\n list {\\n id\\n thumbnailUrl\\n poster\\n\ 149 | workType\\n type\\n useVideoPlayer\\n imgUrls\\n imgSizes\\n magicFace\\n musicName\\n\ 150 | caption\\n location\\n liked\\n onlyFollowerCanComment\\n relativeHeight\\n timestamp\\n\ 151 | width\\n height\\n counts {\\n displayView\\n displayLike\\n displayComment\\n\ 152 | __typename\\n }\\n user {\\n id\\n eid\\n name\\n avatar\\n __typename\\n\ 153 | }\\n expTag\\n __typename\\n }\\n __typename\\n }\\n}\\n\"}" % (self.user_id, pcursor) 154 | data = requests.post(url, timeout=10, headers=self.headers, data=param).json()['data']['publicFeeds'] 155 | pcursor = data['pcursor'] 156 | is2Break = False 157 | for obj in data['list']: 158 | video = { 159 | 'user_id': self.user_id, 160 | 'user_name': obj['user']['name'], 161 | 'video_id': obj['id'], 162 | 'workType': obj['workType'], 163 | 'caption': obj['caption'], 164 | 'timestamp': obj['timestamp'], 165 | } 166 | if(time_min and video['timestamp'] < time_min): 167 | is2Break = True 168 | break 169 | if(time_max == None or video['timestamp'] < time_max + 1000*60*60*24): 170 | if video['workType'] == 'video': 171 | self.videos.append(video) 172 | 173 | return self.videos 174 | 175 | def refreshCookie(self): 176 | ''' 177 | // TODO 178 | ''' 179 | url = "https://id.kuaishou.com/pass/kuaishou/login/passToken" 180 | headers = { 181 | 'Host': 'id.kuaishou.com', 182 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:75.0) Gecko/20100101 Firefox/75.0', 183 | 'Accept': '*/*', 184 | 'Accept-Encoding': 'gzip, deflate, br', 185 | 'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2', 186 | 'Content-Type': 'application/x-www-form-urlencoded', 187 | 'Origin': 'https://live.kuaishou.com', 188 | 'Referer': 'https://live.kuaishou.com/cate/my-follow/living', 189 | 'Connection': 'keep-alive', 190 | 'Pragma': 'no-cache', 191 | 'Cache-Control': 'no-cache', 192 | 'Cookie': self.cookie 193 | } 194 | params = 'sid=kuaishou.live.web' 195 | data = requests.post(url, timeout=10, headers=headers, data=params).text 196 | print(data) 197 | 198 | --------------------------------------------------------------------------------