├── requirements.txt ├── README.md └── recover.py /requirements.txt: -------------------------------------------------------------------------------- 1 | beautifulsoup4 2 | requests 3 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | # TwitchRecover with Cloudflare bypass 3 | 4 | First of all, I would like to express my gratitude to the original author of https://github.com/tanersb/TwitchRecover for their amazing work. This project is built upon their optimization. 5 | 6 | Usage: 7 | 8 | 1. As a first step, Download the zip file and unzip it, open command prompt at project directory root and run ```pip install -r requirements.txt ```to install required packages 9 | 10 | 2. Sign up at https://scrapingant.com/ and go to https://app.scrapingant.com/dashboard to copy your API key. 11 | 12 | 3. Open recover.py in the code editor, locate the variable api_key and replace the value with your API key 13 | (e.g., api_key = "YOUR_API_KEY"), then save the file. 14 | 15 | 4. Using a Twitch Tracker or Streams Charts link: 16 | 17 | You can use the Twitch Tracker or Streams Charts link of a stream to directly get the VOD links. 18 | 19 | 20 | i.e. https://twitchtracker.com/blastpremier/streams/46313458365 21 | 22 | 23 | i.e. https://streamscharts.com/channels/blastpremier/streams/46313458365 24 | 25 | 5. Run recover.py and copy the link from Twitch Tracker or Streams Charts as input. 26 | 27 | 28 | 29 | ## How do I use this link 30 | 31 | 32 | (recommend) Copy the link to N_m3u8DL-CLI-SimpleG(https://github.com/nilaoda/N_m3u8DL-CLI) to initiate the download. 33 | 34 | or 35 | 36 | Use the VLC media player. 37 | CTRL + N (open network stream) and paste this link. 38 | 39 | 40 | 41 | -------------------------------------------------------------------------------- /recover.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import hashlib 3 | import time 4 | import urllib.request 5 | from threading import Thread 6 | from bs4 import BeautifulSoup 7 | import requests 8 | 9 | api_key = "YOUR_API_KEY" 10 | 11 | domains = [ 12 | "https://vod-secure.twitch.tv", 13 | "https://vod-metro.twitch.tv", 14 | "https://vod-pop-secure.twitch.tv", 15 | "https://d2e2de1etea730.cloudfront.net", 16 | "https://dqrpb9wgowsf5.cloudfront.net", 17 | "https://ds0h3roq6wcgc.cloudfront.net", 18 | "https://d2nvs31859zcd8.cloudfront.net", 19 | "https://d2aba1wr3818hz.cloudfront.net", 20 | "https://d3c27h4odz752x.cloudfront.net", 21 | "https://dgeft87wbj63p.cloudfront.net", 22 | "https://d1m7jfoe9zdc1j.cloudfront.net", 23 | "https://d2vjef5jvl6bfs.cloudfront.net", 24 | "https://d1ymi26ma8va5x.cloudfront.net", 25 | "https://d1mhjrowxxagfy.cloudfront.net", 26 | "https://ddacn6pr5v0tl.cloudfront.net", 27 | "https://d3aqoihi2n8ty8.cloudfront.net", 28 | "https://d1xhnb4ptk05mw.cloudfront.net", 29 | "https://d6tizftlrpuof.cloudfront.net", 30 | "https://d36nr0u3xmc4mm.cloudfront.net", 31 | "https://d1oca24q5dwo6d.cloudfront.net", 32 | "https://d2um2qdswy1tb0.cloudfront.net", 33 | 'https://d1w2poirtb3as9.cloudfront.net', 34 | 'https://d6d4ismr40iw.cloudfront.net', 35 | 'https://d1g1f25tn8m2e6.cloudfront.net', 36 | 'https://dykkng5hnh52u.cloudfront.net', 37 | 'https://d2dylwb3shzel1.cloudfront.net', 38 | 'https://d2xmjdvx03ij56.cloudfront.net', 39 | 'https://d1mhjrowxxagfy.cloudfront.net', 40 | "https://d3vd9lfkzbru3h.cloudfront.net"] 41 | 42 | find1c = 0 43 | 44 | 45 | def linkChecker(link): # twitchtracker ve streamscharts destekli 46 | global streamername 47 | global vodID 48 | link = link.split('/') 49 | if link[2] == 'twitchtracker.com': 50 | streamername = link[3] 51 | vodID = link[5] 52 | return 1 53 | elif link[2] == 'streamscharts.com': 54 | streamername = link[4] 55 | vodID = link[6] 56 | return 2 57 | elif link[0] == 'twitchtracker.com': 58 | streamername = link[1] 59 | vodID = link[3] 60 | return 3 61 | elif link[0] == 'streamscharts.com': 62 | streamername = link[2] 63 | vodID = link[4] 64 | return 4 65 | else: 66 | print('Check the link again. (An unsupported link has been entered or the link has an error.)') 67 | return 0 68 | 69 | 70 | def linkTimeCheck(link): 71 | # global timestamp 72 | if linkChecker(link) == 2 or linkChecker(link) == 4: # sadece 2 ve 4 dönerse girsin 73 | print('Date and Time are checking..') 74 | encoded_link = urllib.parse.quote(link, safe='') 75 | link = f"https://api.scrapingant.com/v2/general?url={encoded_link}&x-api-key={api_key}" 76 | r = requests.get(link) 77 | 78 | soup = BeautifulSoup(r.content, 'html.parser') 79 | 80 | gelenveri = soup.find_all('time', 'ml-2 font-bold') 81 | 82 | try: 83 | time = gelenveri[0].text 84 | except: 85 | print('It seems you even copy and paste your api key, please paste it and try again. ') 86 | return 87 | 88 | if '\n' in time: 89 | time = time.replace('\n', '') 90 | 91 | if ',' in time: 92 | time = time.replace(',', '') 93 | 94 | print(f'Clock data: {time}') 95 | print(f'Streamer name: {streamername} \nvodID: {vodID}') 96 | 97 | time = time.split(' ') 98 | 99 | hoursandminut = time[3] 100 | 101 | hoursandminut = hoursandminut.split(':') 102 | 103 | day = int(time[0]) 104 | 105 | month = time[1] 106 | 107 | year = int(time[2]) 108 | 109 | hour = int(hoursandminut[0]) 110 | 111 | minute = int(hoursandminut[1]) 112 | 113 | def months(month): 114 | if month == 'Jan': 115 | return 1 116 | if month == 'Feb': 117 | return 2 118 | if month == 'Mar': 119 | return 3 120 | if month == 'Apr': 121 | return 4 122 | if month == 'May': 123 | return 5 124 | if month == 'Jun': 125 | return 6 126 | if month == 'Jul': 127 | return 7 128 | if month == 'Aug': 129 | return 8 130 | if month == 'Sep': 131 | return 9 132 | if month == 'Oct': 133 | return 10 134 | if month == 'Nov': 135 | return 11 136 | if month == 'Dec': 137 | return 12 138 | else: 139 | return 0 140 | 141 | month = months(month) 142 | 143 | second = 60 144 | 145 | timestamp = str(year) + '-' + str(month) + '-' + str(day) + '-' + str(hour) + '-' + str(minute) + '-' + str( 146 | second) 147 | 148 | print(f'timestamp', timestamp) 149 | return timestamp 150 | 151 | elif linkChecker(link) == 1 or linkChecker(link) == 3: 152 | print('Date and Time are checking...') 153 | 154 | encoded_link = urllib.parse.quote(link, safe='') 155 | link = f"https://api.scrapingant.com/v2/general?url={encoded_link}&x-api-key={api_key}" 156 | header = { 157 | 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.50' 158 | } 159 | 160 | ''' 161 | to do 162 | ["Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36", 163 | "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36", 164 | "Mozilla/5.0 (Windows NT 10.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36", 165 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36", 166 | "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36", 167 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:103.0) Gecko/20100101 Firefox/103.0", 168 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 12.5; rv:103.0) Gecko/20100101 Firefox/103.0", 169 | "Mozilla/5.0 (X11; Linux i686; rv:103.0) Gecko/20100101 Firefox/103.0", 170 | "Mozilla/5.0 (Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0", 171 | "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:103.0) Gecko/20100101 Firefox/103.0", 172 | "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0", 173 | "Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0", 174 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0", 175 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 12.5; rv:102.0) Gecko/20100101 Firefox/102.0", 176 | "Mozilla/5.0 (X11; Linux i686; rv:102.0) Gecko/20100101 Firefox/102.0", 177 | "Mozilla/5.0 (Linux x86_64; rv:102.0) Gecko/20100101 Firefox/102.0", 178 | "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:102.0) Gecko/20100101 Firefox/102.0", 179 | "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:102.0) Gecko/20100101 Firefox/102.0", 180 | "Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:102.0) Gecko/20100101 Firefox/102.0", 181 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 12_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.6 Safari/605.1.15", 182 | "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36 Edg/103.0.1264.77", 183 | "Mozilla/5.0 (Macintosh; Intel Mac OS X 12_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36 Edg/103.0.1264.77"] 184 | ''' 185 | 186 | r = requests.get(link, headers=header) 187 | 188 | soup = BeautifulSoup(r.content, 'html.parser') 189 | 190 | try: 191 | meta_element = soup.find("meta", attrs={"name": "description"}) 192 | content = meta_element.get("content") 193 | except AttributeError: 194 | print("It seems you even copy and paste your api key, please paste it and try again.") 195 | content = meta_element.get("content") 196 | first_time = content.split(" - ")[0].split("on ")[1] 197 | 198 | timestamp = first_time.replace(" ", "-").replace(":", "-") 199 | print(f'timestamp', timestamp) 200 | return timestamp 201 | 202 | elif linkChecker(link) == 0: 203 | print('You entered an unsupported link.') 204 | return 0 205 | else: 206 | print('An unknown error has occurred.') 207 | return None 208 | 209 | 210 | def totimestamp(dt, epoch=datetime.datetime(1970, 1, 1)): 211 | td = dt - epoch 212 | return (td.microseconds + (td.seconds + td.days * 86400) * 10 ** 6) / 10 ** 6 213 | 214 | 215 | def find(timestamp, domain): 216 | timestamp = timestamp.split('-') 217 | year = int(timestamp[0]) 218 | month = int(timestamp[1]) 219 | day = int(timestamp[2]) 220 | hour = int(timestamp[3]) 221 | minute = int(timestamp[4]) 222 | second = int(timestamp[5]) 223 | 224 | def check(url): 225 | global find1c 226 | try: 227 | urllib.request.urlopen(url) 228 | except urllib.error.HTTPError: 229 | pass 230 | else: 231 | print(url) 232 | # webbrowser.open(url) 233 | find1c = 1 234 | 235 | threads = [] 236 | 237 | if second == 60: 238 | for i in range(60): 239 | seconds = i 240 | 241 | td = datetime.datetime(year, month, day, hour, minute, seconds) 242 | 243 | converted_timestamp = totimestamp(td) 244 | 245 | formattedstring = streamername + "_" + \ 246 | vodID + "_" + str(int(converted_timestamp)) 247 | 248 | hash = str(hashlib.sha1( 249 | formattedstring.encode('utf-8')).hexdigest()) 250 | 251 | requiredhash = hash[:20] 252 | 253 | finalformattedstring = requiredhash + '_' + formattedstring 254 | 255 | url = f"{domain}/{finalformattedstring}/chunked/index-dvr.m3u8" 256 | 257 | threads.append(Thread(target=check, args=(url,))) 258 | 259 | for i in threads: 260 | i.start() 261 | for i in threads: 262 | i.join() 263 | else: 264 | td = datetime.datetime(year, month, day, hour, minute, second) 265 | 266 | converted_timestamp = totimestamp(td) 267 | 268 | formattedstring = streamername + "_" + \ 269 | vodID + "_" + str(int(converted_timestamp)) 270 | 271 | hash = str(hashlib.sha1(formattedstring.encode('utf-8')).hexdigest()) 272 | 273 | requiredhash = hash[:20] 274 | 275 | finalformattedstring = requiredhash + '_' + formattedstring 276 | 277 | url = f"{domain}/{finalformattedstring}/chunked/index-dvr.m3u8" 278 | 279 | threads.append(Thread(target=check, args=(url,))) 280 | 281 | for i in threads: 282 | i.start() 283 | for i in threads: 284 | i.join() 285 | 286 | 287 | print('Find the broadcast link you want from Twitchtracker or Streamscharts site.') 288 | link = str(input('Enter the link:')) 289 | 290 | 291 | timestamp = linkTimeCheck(link) 292 | 293 | if timestamp == None: 294 | quit() 295 | 296 | for domain in domains: 297 | if find1c == 0: 298 | find(timestamp, domain) 299 | else: 300 | pass 301 | 302 | if find1c == 0: 303 | print('No File Found on Twitch Servers.') 304 | 305 | if find1c == 1: 306 | time.sleep(10) 307 | --------------------------------------------------------------------------------