├── README.md ├── gplinks_bypass.py └── requirements.txt /README.md: -------------------------------------------------------------------------------- 1 | # gplinks-bypass 2 | gplinks Bypasser 3 | -------------------------------------------------------------------------------- /gplinks_bypass.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | import time 4 | import cloudscraper 5 | from bs4 import BeautifulSoup 6 | 7 | import requests 8 | # eg: https://gplinks.co/XXXX 9 | url = "https://gplinks.co/Z94r6" 10 | 11 | # ======================================= 12 | 13 | def gplinks_bypass(url: str): 14 | client = cloudscraper.create_scraper(allow_brotli=False) 15 | domain ="https://gplinks.co/" 16 | referer = "https://mynewsmedia.co/" 17 | 18 | vid = client.get(url, allow_redirects= False).headers["Location"].split("=")[-1] 19 | url = f"{url}/?{vid}" 20 | 21 | response = client.get(url, allow_redirects=False) 22 | soup = BeautifulSoup(response.content, "html.parser") 23 | 24 | 25 | inputs = soup.find(id="go-link").find_all(name="input") 26 | data = { input.get('name'): input.get('value') for input in inputs } 27 | 28 | time.sleep(10) 29 | headers={"x-requested-with": "XMLHttpRequest"} 30 | bypassed_url = client.post(domain+"links/go", data=data, headers=headers).json()["url"] 31 | return bypassed_url 32 | 33 | print(gplinks_bypass(url)) 34 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | cloudscraper==1.2.58 2 | beautifulsoup4==4.11.1 3 | --------------------------------------------------------------------------------