├── runtime.txt ├── Procfile ├── requirements.txt ├── Dockerfile ├── config.json ├── app.py ├── README.md ├── texts.py ├── main.py ├── ddl.py └── bypasser.py /runtime.txt: -------------------------------------------------------------------------------- 1 | python-3.9.14 2 | -------------------------------------------------------------------------------- /Procfile: -------------------------------------------------------------------------------- 1 | worker: python3 main.py 2 | web: python3 app.py -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | requests 2 | cloudscraper 3 | bs4 4 | python-dotenv 5 | pyrogram 6 | tgcrypto 7 | lxml 8 | cfscrape 9 | urllib3==1.26 10 | flask==2.0.1 -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.9 2 | 3 | WORKDIR /app 4 | 5 | COPY requirements.txt /app/ 6 | RUN pip3 install -r requirements.txt 7 | COPY . /app 8 | 9 | CMD flask run -h 0.0.0.0 -p 10000 & python3 main.py -------------------------------------------------------------------------------- /config.json: -------------------------------------------------------------------------------- 1 | { 2 | "TOKEN": "", 3 | "ID": "", 4 | "HASH": "", 5 | "Laravel_Session": "", 6 | "XSRF_TOKEN": "", 7 | "GDTot_Crypt": "", 8 | "DCRYPT": "", 9 | "KCRYPT": "", 10 | "HCRYPT": "", 11 | "KATCRYPT": "", 12 | "UPTOBOX_TOKEN":"", 13 | "TERA_COOKIE":"", 14 | "CLOUDFLARE":"" 15 | } 16 | -------------------------------------------------------------------------------- /app.py: -------------------------------------------------------------------------------- 1 | import os 2 | from flask import Flask 3 | 4 | app = Flask(__name__) 5 | 6 | @app.route('/') 7 | def home(): 8 | return """ 9 |
10 | 11 |
12 | """ 17 | 18 | if __name__ == "__main__": 19 | port = int(os.environ.get("PORT", 5000)) 20 | app.run(host='0.0.0.0', port=port) -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Link-Bypasser-Bot 2 | 3 | a Telegram Bot that can Bypass Ad Links and Generate Direct Links. see the Bot in Action [@BypassLinkBot](https://t.me/BypassLinkBot) or try it on [Replit](https://replit.com/@bipinkrish/Link-Bypasser#app.py) 4 | 5 | --- 6 | 7 | ## Required Variables 8 | 9 | - `TOKEN` Bot Token from @BotFather 10 | - `HASH` API Hash from my.telegram.org 11 | - `ID` API ID from my.telegram.org 12 | 13 | ## Optional Variables 14 | you can also set these in `config.json` file 15 | 16 | - `CRYPT` GDTot Crypt If you don't know how to get Crypt then [Learn Here](https://www.youtube.com/watch?v=EfZ29CotRSU) 17 | - `XSRF_TOKEN` and `Laravel_Session` XSRF Token and Laravel Session cookies! If you don't know how to get then then watch [this Video](https://www.youtube.com/watch?v=EfZ29CotRSU) (for GDTOT) and do the same for sharer.pw 18 | - `DRIVEFIRE_CRYPT` Drivefire Crypt 19 | - `KOLOP_CRYPT` Kolop Crypt! 20 | - `HUBDRIVE_CRYPT` Hubdrive Crypt 21 | - `KATDRIVE_CRYPT` Katdrive Crypt 22 | - `UPTOBOX_TOKEN` Uptobox Token 23 | - `TERA_COOKIE` Terabox Cookie (only `ndus` value) (see [Help](#help)) 24 | - `CLOUDFLARE` Use `cf_clearance` cookie from and Cloudflare protected sites 25 | 26 | --- 27 | 28 | ## Commands 29 | 30 | ``` 31 | start - Welcome Message 32 | help - List of All Supported Sites 33 | ``` 34 | 35 | --- 36 | 37 | ## Supported Sites 38 | 39 | To see the list of supported sites see [texts.py](https://github.com/bipinkrish/Link-Bypasser-Bot/blob/main/texts.py) file 40 | 41 | --- 42 | 43 | ## Help 44 | 45 | * If you are deploying on VPS, watch videos on how to set/export Environment Variables. OR you can set these in `config.json` file 46 | 47 | * Terabox Cookie 48 | 49 | 1. Open any Browser 50 | 2. Make sure you are logged in with a Terbox account 51 | 3. Press `f12` to open DEV tools and click Network tab 52 | 4. Open any Terbox video link and open Cookies tab 53 | 5. Copy value of `ndus` 54 | 55 |
56 | 57 | ![](https://i.ibb.co/hHBZM5m/Screenshot-113.png) 58 | -------------------------------------------------------------------------------- /texts.py: -------------------------------------------------------------------------------- 1 | gdrivetext = """__- appdrive \n\ 2 | - driveapp \n\ 3 | - drivehub \n\ 4 | - gdflix \n\ 5 | - drivesharer \n\ 6 | - drivebit \n\ 7 | - drivelinks \n\ 8 | - driveace \n\ 9 | - drivepro \n\ 10 | - driveseed \n\ 11 | __""" 12 | 13 | 14 | otherstext = """__- exe, exey \n\ 15 | - sub2unlock, sub2unlock \n\ 16 | - rekonise \n\ 17 | - letsboost \n\ 18 | - phapps2app \n\ 19 | - mboost \n\ 20 | - sub4unlock \n\ 21 | - ytsubme \n\ 22 | - bitly \n\ 23 | - social-unlock \n\ 24 | - boost \n\ 25 | - gooly \n\ 26 | - shrto \n\ 27 | - tinyurl 28 | __""" 29 | 30 | 31 | ddltext = """__- yandex \n\ 32 | - mediafire \n\ 33 | - uptobox \n\ 34 | - osdn \n\ 35 | - github \n\ 36 | - hxfile \n\ 37 | - 1drv (onedrive) \n\ 38 | - pixeldrain \n\ 39 | - antfiles \n\ 40 | - streamtape \n\ 41 | - racaty \n\ 42 | - 1fichier \n\ 43 | - solidfiles \n\ 44 | - krakenfiles \n\ 45 | - upload \n\ 46 | - akmfiles \n\ 47 | - linkbox \n\ 48 | - shrdsk \n\ 49 | - letsupload \n\ 50 | - zippyshare \n\ 51 | - wetransfer \n\ 52 | - terabox, teraboxapp, 4funbox, mirrobox, nephobox, momerybox \n\ 53 | - filepress \n\ 54 | - anonfiles, hotfile, bayfiles, megaupload, letsupload, filechan, myfile, vshare, rapidshare, lolabits, openload, share-online, upvid \n\ 55 | - fembed, fembed, femax20, fcdn, feurl, layarkacaxxi, naniplay, nanime, naniplay, mm9842 \n\ 56 | - sbembed, watchsb, streamsb, sbplay. 57 | __""" 58 | 59 | 60 | shortnertext = """__- igg-games \n\ 61 | - olamovies\n\ 62 | - katdrive \n\ 63 | - drivefire\n\ 64 | - kolop \n\ 65 | - hubdrive \n\ 66 | - filecrypt \n\ 67 | - shareus \n\ 68 | - shortingly \n\ 69 | - gyanilinks \n\ 70 | - shorte \n\ 71 | - psa \n\ 72 | - sharer \n\ 73 | - new1.gdtot \n\ 74 | - adfly\n\ 75 | - gplinks\n\ 76 | - droplink \n\ 77 | - linkvertise \n\ 78 | - rocklinks \n\ 79 | - ouo \n\ 80 | - try2link \n\ 81 | - htpmovies \n\ 82 | - sharespark \n\ 83 | - cinevood\n\ 84 | - atishmkv \n\ 85 | - urlsopen \n\ 86 | - xpshort, techymozo \n\ 87 | - dulink \n\ 88 | - ez4short \n\ 89 | - krownlinks \n\ 90 | - teluguflix \n\ 91 | - taemovies \n\ 92 | - toonworld4all \n\ 93 | - animeremux \n\ 94 | - adrinolinks \n\ 95 | - tnlink \n\ 96 | - flashlink \n\ 97 | - short2url \n\ 98 | - tinyfy \n\ 99 | - mdiskshortners \n\ 100 | - earnl \n\ 101 | - moneykamalo \n\ 102 | - easysky \n\ 103 | - indiurl \n\ 104 | - linkbnao \n\ 105 | - mdiskpro \n\ 106 | - tnshort \n\ 107 | - indianshortner \n\ 108 | - rslinks \n\ 109 | - bitly, tinyurl \n\ 110 | - thinfi \n\ 111 | - pdisk \n\ 112 | - vnshortener \n\ 113 | - onepagelink \n\ 114 | __""" 115 | 116 | 117 | HELP_TEXT = f'**--Just Send me any Supported Links From Below Mentioned Sites--** \n\n\ 118 | **List of Sites for DDL : ** \n\n{ddltext} \n\ 119 | **List of Sites for Shortners : ** \n\n{shortnertext} \n\ 120 | **List of Sites for GDrive Look-ALike : ** \n\n{gdrivetext} \n\ 121 | **Other Supported Sites : ** \n\n{otherstext}' 122 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import pyrogram 2 | from pyrogram import Client,filters 3 | from pyrogram.types import InlineKeyboardMarkup,InlineKeyboardButton 4 | from os import environ, remove 5 | from threading import Thread 6 | from json import load 7 | from re import search 8 | 9 | from texts import HELP_TEXT 10 | import bypasser 11 | from ddl import ddllist, direct_link_generator 12 | from time import time 13 | 14 | 15 | # bot 16 | with open('config.json', 'r') as f: DATA = load(f) 17 | def getenv(var): return environ.get(var) or DATA.get(var, None) 18 | 19 | bot_token = getenv("TOKEN") 20 | api_hash = getenv("HASH") 21 | api_id = getenv("ID") 22 | app = Client("my_bot",api_id=api_id, api_hash=api_hash,bot_token=bot_token) 23 | 24 | 25 | # handle ineex 26 | def handleIndex(ele,message,msg): 27 | result = bypasser.scrapeIndex(ele) 28 | try: app.delete_messages(message.chat.id, msg.id) 29 | except: pass 30 | for page in result: app.send_message(message.chat.id, page, reply_to_message_id=message.id, disable_web_page_preview=True) 31 | 32 | 33 | # loop thread 34 | def loopthread(message,otherss=False): 35 | 36 | urls = [] 37 | if otherss: texts = message.caption 38 | else: texts = message.text 39 | 40 | if texts in [None,""]: return 41 | for ele in texts.split(): 42 | if "http://" in ele or "https://" in ele: 43 | urls.append(ele) 44 | if len(urls) == 0: return 45 | 46 | if bypasser.ispresent(ddllist,urls[0]): 47 | msg = app.send_message(message.chat.id, "⚡ __generating...__", reply_to_message_id=message.id) 48 | else: 49 | if "https://olamovies" in urls[0] or "https://psa.wf/" in urls[0]: 50 | msg = app.send_message(message.chat.id, "🔎 __this might take some time...__", reply_to_message_id=message.id) 51 | else: 52 | msg = app.send_message(message.chat.id, "🔎 __bypassing...__", reply_to_message_id=message.id) 53 | 54 | strt = time() 55 | links = "" 56 | for ele in urls: 57 | if search(r"https?:\/\/(?:[\w.-]+)?\.\w+\/\d+:", ele): 58 | handleIndex(ele,message,msg) 59 | return 60 | elif bypasser.ispresent(ddllist,ele): 61 | try: temp = direct_link_generator(ele) 62 | except Exception as e: temp = "**Error**: " + str(e) 63 | else: 64 | try: temp = bypasser.shortners(ele) 65 | except Exception as e: temp = "**Error**: " + str(e) 66 | print("bypassed:",temp) 67 | if temp != None: links = links + temp + "\n" 68 | end = time() 69 | print("Took " + "{:.2f}".format(end-strt) + "sec") 70 | 71 | if otherss: 72 | try: 73 | app.send_photo(message.chat.id, message.photo.file_id, f'__{links}__', reply_to_message_id=message.id) 74 | app.delete_messages(message.chat.id,[msg.id]) 75 | return 76 | except: pass 77 | 78 | try: 79 | final = [] 80 | tmp = "" 81 | for ele in links.split("\n"): 82 | tmp += ele + "\n" 83 | if len(tmp) > 4000: 84 | final.append(tmp) 85 | tmp = "" 86 | final.append(tmp) 87 | app.delete_messages(message.chat.id, msg.id) 88 | tmsgid = message.id 89 | for ele in final: 90 | tmsg = app.send_message(message.chat.id, f'__{ele}__',reply_to_message_id=tmsgid, disable_web_page_preview=True) 91 | tmsgid = tmsg.id 92 | except Exception as e: 93 | print(e) 94 | try: app.send_message(message.chat.id, "__Failed to Bypass__", reply_to_message_id=message.id) 95 | except: 96 | app.send_message(message.chat.id, "__Failed to Bypass__", reply_to_message_id=message.id) 97 | 98 | 99 | # start command 100 | @app.on_message(filters.command(["start"])) 101 | def send_start(client: pyrogram.client.Client, message: pyrogram.types.messages_and_media.message.Message): 102 | app.send_message(message.chat.id, f"__👋 Hi **{message.from_user.mention}**, i am Link Bypasser Bot, just send me any supported links and i will you get you results.\nCheckout /help to Read More__", 103 | reply_markup=InlineKeyboardMarkup([ 104 | [ InlineKeyboardButton("🌐 Source Code", url="https://github.com/bipinkrish/Link-Bypasser-Bot")], 105 | [ InlineKeyboardButton("Replit", url="https://replit.com/@bipinkrish/Link-Bypasser#app.py") ]]), 106 | reply_to_message_id=message.id) 107 | 108 | 109 | # help command 110 | @app.on_message(filters.command(["help"])) 111 | def send_help(client: pyrogram.client.Client, message: pyrogram.types.messages_and_media.message.Message): 112 | app.send_message(message.chat.id, HELP_TEXT, reply_to_message_id=message.id, disable_web_page_preview=True) 113 | 114 | 115 | # links 116 | @app.on_message(filters.text) 117 | def receive(client: pyrogram.client.Client, message: pyrogram.types.messages_and_media.message.Message): 118 | bypass = Thread(target=lambda:loopthread(message),daemon=True) 119 | bypass.start() 120 | 121 | 122 | # doc thread 123 | def docthread(message): 124 | msg = app.send_message(message.chat.id, "🔎 __bypassing...__", reply_to_message_id=message.id) 125 | print("sent DLC file") 126 | file = app.download_media(message) 127 | dlccont = open(file,"r").read() 128 | links = bypasser.getlinks(dlccont) 129 | app.edit_message_text(message.chat.id, msg.id, f'__{links}__', disable_web_page_preview=True) 130 | remove(file) 131 | 132 | 133 | # files 134 | @app.on_message([filters.document,filters.photo,filters.video]) 135 | def docfile(client: pyrogram.client.Client, message: pyrogram.types.messages_and_media.message.Message): 136 | 137 | try: 138 | if message.document.file_name.endswith("dlc"): 139 | bypass = Thread(target=lambda:docthread(message),daemon=True) 140 | bypass.start() 141 | return 142 | except: pass 143 | 144 | bypass = Thread(target=lambda:loopthread(message,True),daemon=True) 145 | bypass.start() 146 | 147 | 148 | # server loop 149 | print("Bot Starting") 150 | app.run() 151 | -------------------------------------------------------------------------------- /ddl.py: -------------------------------------------------------------------------------- 1 | from base64 import standard_b64encode 2 | from json import loads 3 | from math import floor, pow 4 | from re import findall, match, search, sub 5 | from time import sleep 6 | from urllib.parse import quote, unquote, urlparse 7 | from uuid import uuid4 8 | 9 | from bs4 import BeautifulSoup 10 | from cfscrape import create_scraper 11 | from lxml import etree 12 | from requests import get, session 13 | 14 | from json import load 15 | from os import environ 16 | 17 | with open('config.json', 'r') as f: DATA = load(f) 18 | def getenv(var): return environ.get(var) or DATA.get(var, None) 19 | 20 | 21 | UPTOBOX_TOKEN = getenv("UPTOBOX_TOKEN") 22 | ndus = getenv("TERA_COOKIE") 23 | if ndus is None: TERA_COOKIE = None 24 | else: TERA_COOKIE = {"ndus": ndus} 25 | 26 | 27 | ddllist = ['yadi.sk','disk.yandex.com','mediafire.com','uptobox.com','osdn.net','github.com', 28 | 'hxfile.co','1drv.ms','pixeldrain.com','antfiles.com','streamtape','racaty','1fichier.com', 29 | 'solidfiles.com','krakenfiles.com','mdisk.me','upload.ee','akmfiles','linkbox','shrdsk','letsupload.io', 30 | 'zippyshare.com','wetransfer.com','we.tl','terabox','nephobox','4funbox','mirrobox','momerybox', 31 | 'teraboxapp','sbembed.com','watchsb.com','streamsb.net','sbplay.org','filepress', 32 | 'fembed.net', 'fembed.com', 'femax20.com', 'fcdn.stream', 'feurl.com', 'layarkacaxxi.icu', 33 | 'naniplay.nanime.in', 'naniplay.nanime.biz', 'naniplay.com', 'mm9842.com','anonfiles.com', 34 | 'hotfile.io', 'bayfiles.com', 'megaupload.nz', 'letsupload.cc','filechan.org', 'myfile.is', 35 | 'vshare.is', 'rapidshare.nu', 'lolabits.se','openload.cc', 'share-online.is', 'upvid.cc'] 36 | 37 | 38 | def is_share_link(url): 39 | return bool(match(r'https?:\/\/.+\.gdtot\.\S+|https?:\/\/(filepress|filebee|appdrive|gdflix|driveseed)\.\S+', url)) 40 | 41 | 42 | def get_readable_time(seconds): 43 | result = '' 44 | (days, remainder) = divmod(seconds, 86400) 45 | days = int(days) 46 | if days != 0: 47 | result += f'{days}d' 48 | (hours, remainder) = divmod(remainder, 3600) 49 | hours = int(hours) 50 | if hours != 0: 51 | result += f'{hours}h' 52 | (minutes, seconds) = divmod(remainder, 60) 53 | minutes = int(minutes) 54 | if minutes != 0: 55 | result += f'{minutes}m' 56 | seconds = int(seconds) 57 | result += f'{seconds}s' 58 | return result 59 | 60 | 61 | fmed_list = ['fembed.net', 'fembed.com', 'femax20.com', 'fcdn.stream', 'feurl.com', 'layarkacaxxi.icu', 62 | 'naniplay.nanime.in', 'naniplay.nanime.biz', 'naniplay.com', 'mm9842.com'] 63 | 64 | anonfilesBaseSites = ['anonfiles.com', 'hotfile.io', 'bayfiles.com', 'megaupload.nz', 'letsupload.cc', 65 | 'filechan.org', 'myfile.is', 'vshare.is', 'rapidshare.nu', 'lolabits.se', 66 | 'openload.cc', 'share-online.is', 'upvid.cc'] 67 | 68 | 69 | def direct_link_generator(link: str): 70 | """ direct links generator """ 71 | domain = urlparse(link).hostname 72 | if 'yadi.sk' in domain or 'disk.yandex.com' in domain: 73 | return yandex_disk(link) 74 | elif 'mediafire.com' in domain: 75 | return mediafire(link) 76 | elif 'uptobox.com' in domain: 77 | return uptobox(link) 78 | elif 'osdn.net' in domain: 79 | return osdn(link) 80 | elif 'github.com' in domain: 81 | return github(link) 82 | elif 'hxfile.co' in domain: 83 | return hxfile(link) 84 | elif '1drv.ms' in domain: 85 | return onedrive(link) 86 | elif 'pixeldrain.com' in domain: 87 | return pixeldrain(link) 88 | elif 'antfiles.com' in domain: 89 | return antfiles(link) 90 | elif 'streamtape' in domain: 91 | return streamtape(link) 92 | elif 'racaty' in domain: 93 | return racaty(link) 94 | elif '1fichier.com' in domain: 95 | return fichier(link) 96 | elif 'solidfiles.com' in domain: 97 | return solidfiles(link) 98 | elif 'krakenfiles.com' in domain: 99 | return krakenfiles(link) 100 | elif 'upload.ee' in domain: 101 | return uploadee(link) 102 | elif 'akmfiles' in domain: 103 | return akmfiles(link) 104 | elif 'linkbox' in domain: 105 | return linkbox(link) 106 | elif 'shrdsk' in domain: 107 | return shrdsk(link) 108 | elif 'letsupload.io' in domain: 109 | return letsupload(link) 110 | elif 'zippyshare.com' in domain: 111 | return zippyshare(link) 112 | elif 'mdisk.me' in domain: 113 | return mdisk(link) 114 | elif any(x in domain for x in ['wetransfer.com', 'we.tl']): 115 | return wetransfer(link) 116 | elif any(x in domain for x in anonfilesBaseSites): 117 | return anonfilesBased(link) 118 | elif any(x in domain for x in ['terabox', 'nephobox', '4funbox', 'mirrobox', 'momerybox', 'teraboxapp']): 119 | return terabox(link) 120 | elif any(x in domain for x in fmed_list): 121 | return fembed(link) 122 | elif any(x in domain for x in ['sbembed.com', 'watchsb.com', 'streamsb.net', 'sbplay.org']): 123 | return sbembed(link) 124 | elif is_share_link(link): 125 | if 'gdtot' in domain: 126 | return gdtot(link) 127 | elif 'filepress' in domain: 128 | return filepress(link) 129 | else: 130 | return sharer_scraper(link) 131 | else: 132 | return f'No Direct link function found for\n\n{link}\n\nuse /ddllist' 133 | 134 | 135 | def mdisk(url): 136 | header = { 137 | 'Accept': '*/*', 138 | 'Accept-Language': 'en-US,en;q=0.5', 139 | 'Accept-Encoding': 'gzip, deflate, br', 140 | 'Referer': 'https://mdisk.me/', 141 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36' 142 | } 143 | id = url.split("/")[-1] 144 | URL = f'https://diskuploader.entertainvideo.com/v1/file/cdnurl?param={id}' 145 | return get(url=URL, headers=header).json()['source'] 146 | 147 | 148 | def yandex_disk(url: str) -> str: 149 | """ Yandex.Disk direct link generator 150 | Based on https://github.com/wldhx/yadisk-direct """ 151 | try: 152 | link = findall(r'\b(https?://(yadi.sk|disk.yandex.com)\S+)', url)[0][0] 153 | except IndexError: 154 | return "No Yandex.Disk links found\n" 155 | api = 'https://cloud-api.yandex.net/v1/disk/public/resources/download?public_key={}' 156 | cget = create_scraper().request 157 | try: 158 | return cget('get', api.format(link)).json()['href'] 159 | except KeyError: 160 | return ( 161 | "ERROR: File not found/Download limit reached") 162 | 163 | 164 | def uptobox(url: str) -> str: 165 | """ Uptobox direct link generator 166 | based on https://github.com/jovanzers/WinTenCermin and https://github.com/sinoobie/noobie-mirror """ 167 | try: 168 | link = findall(r'\bhttps?://.*uptobox\.com\S+', url)[0] 169 | except IndexError: 170 | return ("No Uptobox links found") 171 | link = findall(r'\bhttps?://.*\.uptobox\.com/dl\S+', url) 172 | if link: return link[0] 173 | cget = create_scraper().request 174 | try: 175 | file_id = findall(r'\bhttps?://.*uptobox\.com/(\w+)', url)[0] 176 | if UPTOBOX_TOKEN: 177 | file_link = f'https://uptobox.com/api/link?token={UPTOBOX_TOKEN}&file_code={file_id}' 178 | else: 179 | file_link = f'https://uptobox.com/api/link?file_code={file_id}' 180 | res = cget('get', file_link).json() 181 | except Exception as e: 182 | return (f"ERROR: {e.__class__.__name__}") 183 | if res['statusCode'] == 0: 184 | return res['data']['dlLink'] 185 | elif res['statusCode'] == 16: 186 | sleep(1) 187 | waiting_token = res["data"]["waitingToken"] 188 | sleep(res["data"]["waiting"]) 189 | elif res['statusCode'] == 39: 190 | return ( 191 | f"ERROR: Uptobox is being limited please wait {get_readable_time(res['data']['waiting'])}") 192 | else: 193 | return (f"ERROR: {res['message']}") 194 | try: 195 | res = cget('get', f"{file_link}&waitingToken={waiting_token}").json() 196 | return res['data']['dlLink'] 197 | except Exception as e: 198 | return (f"ERROR: {e.__class__.__name__}") 199 | 200 | 201 | def mediafire(url: str) -> str: 202 | final_link = findall(r'https?:\/\/download\d+\.mediafire\.com\/\S+\/\S+\/\S+', url) 203 | if final_link: return final_link[0] 204 | cget = create_scraper().request 205 | try: 206 | url = cget('get', url).url 207 | page = cget('get', url).text 208 | except Exception as e: 209 | return (f"ERROR: {e.__class__.__name__}") 210 | final_link = findall(r"\'(https?:\/\/download\d+\.mediafire\.com\/\S+\/\S+\/\S+)\'", page) 211 | if not final_link:return ("ERROR: No links found in this page") 212 | return final_link[0] 213 | 214 | 215 | def osdn(url: str) -> str: 216 | """ OSDN direct link generator """ 217 | osdn_link = 'https://osdn.net' 218 | try: 219 | link = findall(r'\bhttps?://.*osdn\.net\S+', url)[0] 220 | except IndexError: 221 | return ("No OSDN links found") 222 | cget = create_scraper().request 223 | try: 224 | page = BeautifulSoup( 225 | cget('get', link, allow_redirects=True).content, 'lxml') 226 | except Exception as e: 227 | return (f"ERROR: {e.__class__.__name__}") 228 | info = page.find('a', {'class': 'mirror_link'}) 229 | link = unquote(osdn_link + info['href']) 230 | mirrors = page.find('form', {'id': 'mirror-select-form'}).findAll('tr') 231 | urls = [] 232 | for data in mirrors[1:]: 233 | mirror = data.find('input')['value'] 234 | urls.append(sub(r'm=(.*)&f', f'm={mirror}&f', link)) 235 | return urls[0] 236 | 237 | 238 | def github(url: str) -> str: 239 | """ GitHub direct links generator """ 240 | try: 241 | findall(r'\bhttps?://.*github\.com.*releases\S+', url)[0] 242 | except IndexError: 243 | return ("No GitHub Releases links found") 244 | cget = create_scraper().request 245 | download = cget('get', url, stream=True, allow_redirects=False) 246 | try: 247 | return download.headers["location"] 248 | except KeyError: 249 | return ("ERROR: Can't extract the link") 250 | 251 | 252 | def hxfile(url: str) -> str: 253 | sess = session() 254 | try: 255 | headers = { 256 | 'content-type': 'application/x-www-form-urlencoded', 257 | 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.152 Safari/537.36', 258 | } 259 | 260 | data = { 261 | 'op': 'download2', 262 | 'id': urlparse(url).path.strip("/")(url), 263 | 'rand': '', 264 | 'referer': '', 265 | 'method_free': '', 266 | 'method_premium': '', 267 | } 268 | 269 | response = sess.post(url, headers=headers, data=data) 270 | soup = BeautifulSoup(response,"html.parser") 271 | 272 | if (btn := soup.find(class_="btn btn-dow")): 273 | return btn["href"] 274 | if (unique := soup.find(id="uniqueExpirylink")): 275 | return unique["href"] 276 | 277 | except Exception as e: 278 | return (f"ERROR: {e.__class__.__name__}") 279 | 280 | 281 | def letsupload(url: str) -> str: 282 | cget = create_scraper().request 283 | try: 284 | res = cget("POST", url) 285 | except Exception as e: 286 | return (f'ERROR: {e.__class__.__name__}') 287 | direct_link = findall(r"(https?://letsupload\.io\/.+?)\'", res.text) 288 | if direct_link: return direct_link[0] 289 | else: 290 | return ('ERROR: Direct Link not found') 291 | 292 | 293 | def anonfilesBased(url: str) -> str: 294 | cget = create_scraper().request 295 | try: 296 | soup = BeautifulSoup(cget('get', url).content, 'lxml') 297 | except Exception as e: 298 | return (f"ERROR: {e.__class__.__name__}") 299 | sa = soup.find(id="download-url") 300 | if sa: return sa['href'] 301 | return ("ERROR: File not found!") 302 | 303 | 304 | def fembed(link: str) -> str: 305 | sess = session() 306 | try: 307 | url = url.replace("/v/", "/f/") 308 | raw = session.get(url) 309 | api = search(r"(/api/source/[^\"']+)", raw.text) 310 | if api is not None: 311 | result = {} 312 | raw = sess.post( 313 | "https://layarkacaxxi.icu" + api.group(1)).json() 314 | for d in raw["data"]: 315 | f = d["file"] 316 | head = sess.head(f) 317 | direct = head.headers.get("Location", url) 318 | result[f"{d['label']}/{d['type']}"] = direct 319 | dl_url = result 320 | 321 | count = len(dl_url) 322 | lst_link = [dl_url[i] for i in dl_url] 323 | return lst_link[count-1] 324 | except Exception as e: 325 | return (f"ERROR: {e.__class__.__name__}") 326 | 327 | 328 | def sbembed(link: str) -> str: 329 | sess = session() 330 | try: 331 | raw = sess.get(link) 332 | soup = BeautifulSoup(raw,"html.parser") 333 | 334 | result = {} 335 | for a in soup.findAll("a", onclick=compile(r"^download_video[^>]+")): 336 | data = dict(zip(["id", "mode", "hash"], findall( 337 | r"[\"']([^\"']+)[\"']", a["onclick"]))) 338 | data["op"] = "download_orig" 339 | 340 | raw = sess.get("https://sbembed.com/dl", params=data) 341 | soup = BeautifulSoup(raw,"html.parser") 342 | 343 | if (direct := soup.find("a", text=compile("(?i)^direct"))): 344 | result[a.text] = direct["href"] 345 | dl_url = result 346 | 347 | count = len(dl_url) 348 | lst_link = [dl_url[i] for i in dl_url] 349 | return lst_link[count-1] 350 | 351 | except Exception as e: 352 | return (f"ERROR: {e.__class__.__name__}") 353 | 354 | 355 | def onedrive(link: str) -> str: 356 | """ Onedrive direct link generator 357 | Based on https://github.com/UsergeTeam/Userge """ 358 | link_without_query = urlparse(link)._replace(query=None).geturl() 359 | direct_link_encoded = str(standard_b64encode( 360 | bytes(link_without_query, "utf-8")), "utf-8") 361 | direct_link1 = f"https://api.onedrive.com/v1.0/shares/u!{direct_link_encoded}/root/content" 362 | cget = create_scraper().request 363 | try: 364 | resp = cget('head', direct_link1) 365 | except Exception as e: 366 | return (f"ERROR: {e.__class__.__name__}") 367 | if resp.status_code != 302: 368 | return ( 369 | "ERROR: Unauthorized link, the link may be private") 370 | return resp.next.url 371 | 372 | 373 | def pixeldrain(url: str) -> str: 374 | """ Based on https://github.com/yash-dk/TorToolkit-Telegram """ 375 | url = url.strip("/ ") 376 | file_id = url.split("/")[-1] 377 | if url.split("/")[-2] == "l": 378 | info_link = f"https://pixeldrain.com/api/list/{file_id}" 379 | dl_link = f"https://pixeldrain.com/api/list/{file_id}/zip" 380 | else: 381 | info_link = f"https://pixeldrain.com/api/file/{file_id}/info" 382 | dl_link = f"https://pixeldrain.com/api/file/{file_id}" 383 | cget = create_scraper().request 384 | try: 385 | resp = cget('get', info_link).json() 386 | except Exception as e: 387 | return (f"ERROR: {e.__class__.__name__}") 388 | if resp["success"]: 389 | return dl_link 390 | else: 391 | return ( 392 | f"ERROR: Cant't download due {resp['message']}.") 393 | 394 | 395 | def antfiles(url: str) -> str: 396 | sess = session() 397 | try: 398 | raw = sess.get(url) 399 | soup = BeautifulSoup(raw,"html.parser") 400 | 401 | if (a := soup.find(class_="main-btn", href=True)): 402 | return "{0.scheme}://{0.netloc}/{1}".format(urlparse(url), a["href"]) 403 | 404 | except Exception as e: 405 | return (f"ERROR: {e.__class__.__name__}") 406 | 407 | 408 | def streamtape(url: str) -> str: 409 | response = get(url) 410 | 411 | if (videolink := findall(r"document.*((?=id\=)[^\"']+)", response.text)): 412 | nexturl = "https://streamtape.com/get_video?" + videolink[-1] 413 | try: return nexturl 414 | except Exception as e: return (f"ERROR: {e.__class__.__name__}") 415 | 416 | 417 | def racaty(url: str) -> str: 418 | """ Racaty direct link generator 419 | By https://github.com/junedkh """ 420 | cget = create_scraper().request 421 | try: 422 | url = cget('GET', url).url 423 | json_data = { 424 | 'op': 'download2', 425 | 'id': url.split('/')[-1] 426 | } 427 | res = cget('POST', url, data=json_data) 428 | except Exception as e: 429 | return (f'ERROR: {e.__class__.__name__}') 430 | html_tree = etree.HTML(res.text) 431 | direct_link = html_tree.xpath("//a[contains(@id,'uniqueExpirylink')]/@href") 432 | if direct_link: 433 | return direct_link[0] 434 | else: 435 | return ('ERROR: Direct link not found') 436 | 437 | 438 | def fichier(link: str) -> str: 439 | """ 1Fichier direct link generator 440 | Based on https://github.com/Maujar 441 | """ 442 | regex = r"^([http:\/\/|https:\/\/]+)?.*1fichier\.com\/\?.+" 443 | gan = match(regex, link) 444 | if not gan: 445 | return ( 446 | "ERROR: The link you entered is wrong!") 447 | if "::" in link: 448 | pswd = link.split("::")[-1] 449 | url = link.split("::")[-2] 450 | else: 451 | pswd = None 452 | url = link 453 | cget = create_scraper().request 454 | try: 455 | if pswd is None: 456 | req = cget('post', url) 457 | else: 458 | pw = {"pass": pswd} 459 | req = cget('post', url, data=pw) 460 | except Exception as e: 461 | return (f"ERROR: {e.__class__.__name__}") 462 | if req.status_code == 404: 463 | return ( 464 | "ERROR: File not found/The link you entered is wrong!") 465 | soup = BeautifulSoup(req.content, 'lxml') 466 | if soup.find("a", {"class": "ok btn-general btn-orange"}): 467 | dl_url = soup.find("a", {"class": "ok btn-general btn-orange"})["href"] 468 | if dl_url: return dl_url 469 | return ( 470 | "ERROR: Unable to generate Direct Link 1fichier!") 471 | elif len(soup.find_all("div", {"class": "ct_warn"})) == 3: 472 | str_2 = soup.find_all("div", {"class": "ct_warn"})[-1] 473 | if "you must wait" in str(str_2).lower(): 474 | numbers = [int(word) for word in str(str_2).split() if word.isdigit()] 475 | if numbers: return ( 476 | f"ERROR: 1fichier is on a limit. Please wait {numbers[0]} minute.") 477 | else: 478 | return ( 479 | "ERROR: 1fichier is on a limit. Please wait a few minutes/hour.") 480 | elif "protect access" in str(str_2).lower(): 481 | return ( 482 | "ERROR: This link requires a password!\n\nThis link requires a password!\n- Insert sign :: after the link and write the password after the sign.\n\nExample: https://1fichier.com/?smmtd8twfpm66awbqz04::love you\n\n* No spaces between the signs ::\n* For the password, you can use a space!") 483 | else: 484 | return ( 485 | "ERROR: Failed to generate Direct Link from 1fichier!") 486 | elif len(soup.find_all("div", {"class": "ct_warn"})) == 4: 487 | str_1 = soup.find_all("div", {"class": "ct_warn"})[-2] 488 | str_3 = soup.find_all("div", {"class": "ct_warn"})[-1] 489 | if "you must wait" in str(str_1).lower(): 490 | numbers = [int(word) for word in str(str_1).split() if word.isdigit()] 491 | if numbers: return ( 492 | f"ERROR: 1fichier is on a limit. Please wait {numbers[0]} minute.") 493 | else: 494 | return ( 495 | "ERROR: 1fichier is on a limit. Please wait a few minutes/hour.") 496 | elif "bad password" in str(str_3).lower(): 497 | return ( 498 | "ERROR: The password you entered is wrong!") 499 | else: 500 | return ( 501 | "ERROR: Error trying to generate Direct Link from 1fichier!") 502 | else: 503 | return ( 504 | "ERROR: Error trying to generate Direct Link from 1fichier!") 505 | 506 | 507 | def solidfiles(url: str) -> str: 508 | """ Solidfiles direct link generator 509 | Based on https://github.com/Xonshiz/SolidFiles-Downloader 510 | By https://github.com/Jusidama18 """ 511 | cget = create_scraper().request 512 | try: 513 | headers = { 514 | 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1985.125 Safari/537.36' 515 | } 516 | pageSource = cget('get', url, headers=headers).text 517 | mainOptions = str( 518 | search(r'viewerOptions\'\,\ (.*?)\)\;', pageSource).group(1)) 519 | return loads(mainOptions)["downloadUrl"] 520 | except Exception as e: 521 | return (f"ERROR: {e.__class__.__name__}") 522 | 523 | 524 | def krakenfiles(page_link: str) -> str: 525 | """ krakenfiles direct link generator 526 | Based on https://github.com/tha23rd/py-kraken 527 | By https://github.com/junedkh """ 528 | cget = create_scraper().request 529 | try: 530 | page_resp = cget('get', page_link) 531 | except Exception as e: 532 | return (f"ERROR: {e.__class__.__name__}") 533 | soup = BeautifulSoup(page_resp.text, "lxml") 534 | try: 535 | token = soup.find("input", id="dl-token")["value"] 536 | except: 537 | return ( 538 | f"ERROR: Page link is wrong: {page_link}") 539 | hashes = [ 540 | item["data-file-hash"] 541 | for item in soup.find_all("div", attrs={"data-file-hash": True}) 542 | ] 543 | if not hashes: 544 | return ( 545 | f"ERROR: Hash not found for : {page_link}") 546 | dl_hash = hashes[0] 547 | payload = f'------WebKitFormBoundary7MA4YWxkTrZu0gW\r\nContent-Disposition: form-data; name="token"\r\n\r\n{token}\r\n------WebKitFormBoundary7MA4YWxkTrZu0gW--' 548 | headers = { 549 | "content-type": "multipart/form-data; boundary=----WebKitFormBoundary7MA4YWxkTrZu0gW", 550 | "cache-control": "no-cache", 551 | "hash": dl_hash, 552 | } 553 | dl_link_resp = cget( 554 | 'post', f"https://krakenfiles.com/download/{hash}", data=payload, headers=headers) 555 | dl_link_json = dl_link_resp.json() 556 | if "url" in dl_link_json: 557 | return dl_link_json["url"] 558 | else: 559 | return ( 560 | f"ERROR: Failed to acquire download URL from kraken for : {page_link}") 561 | 562 | 563 | def uploadee(url: str) -> str: 564 | """ uploadee direct link generator 565 | By https://github.com/iron-heart-x""" 566 | cget = create_scraper().request 567 | try: 568 | soup = BeautifulSoup(cget('get', url).content, 'lxml') 569 | sa = soup.find('a', attrs={'id': 'd_l'}) 570 | return sa['href'] 571 | except: 572 | return ( 573 | f"ERROR: Failed to acquire download URL from upload.ee for : {url}") 574 | 575 | 576 | def terabox(url) -> str: 577 | sess = session() 578 | while True: 579 | try: 580 | res = sess.get(url) 581 | print("connected") 582 | break 583 | except: print("retrying") 584 | url = res.url 585 | 586 | key = url.split('?surl=')[-1] 587 | url = f'http://www.terabox.com/wap/share/filelist?surl={key}' 588 | sess.cookies.update(TERA_COOKIE) 589 | 590 | while True: 591 | try: 592 | res = sess.get(url) 593 | print("connected") 594 | break 595 | except Exception as e: print("retrying") 596 | 597 | key = res.url.split('?surl=')[-1] 598 | soup = BeautifulSoup(res.content, 'lxml') 599 | jsToken = None 600 | 601 | for fs in soup.find_all('script'): 602 | fstring = fs.string 603 | if fstring and fstring.startswith('try {eval(decodeURIComponent'): 604 | jsToken = fstring.split('%22')[1] 605 | 606 | while True: 607 | try: 608 | res = sess.get(f'https://www.terabox.com/share/list?app_id=250528&jsToken={jsToken}&shorturl={key}&root=1') 609 | print("connected") 610 | break 611 | except: print("retrying") 612 | result = res.json() 613 | 614 | if result['errno'] != 0: return f"ERROR: '{result['errmsg']}' Check cookies" 615 | result = result['list'] 616 | if len(result) > 1: return "ERROR: Can't download mutiple files" 617 | result = result[0] 618 | 619 | if result['isdir'] != '0':return "ERROR: Can't download folder" 620 | return result.get('dlink',"Error") 621 | 622 | 623 | def filepress(url): 624 | cget = create_scraper().request 625 | try: 626 | url = cget('GET', url).url 627 | raw = urlparse(url) 628 | json_data = { 629 | 'id': raw.path.split('/')[-1], 630 | 'method': 'publicDownlaod', 631 | } 632 | api = f'{raw.scheme}://{raw.hostname}/api/file/downlaod/' 633 | res = cget('POST', api, headers={ 634 | 'Referer': f'{raw.scheme}://{raw.hostname}'}, json=json_data).json() 635 | except Exception as e: 636 | return (f'ERROR: {e.__class__.__name__}') 637 | if 'data' not in res: 638 | return (f'ERROR: {res["statusText"]}') 639 | return f'https://drive.google.com/uc?id={res["data"]}' 640 | 641 | 642 | def gdtot(url): 643 | cget = create_scraper().request 644 | try: 645 | res = cget('GET', f'https://gdbot.xyz/file/{url.split("/")[-1]}') 646 | except Exception as e: 647 | return (f'ERROR: {e.__class__.__name__}') 648 | token_url = etree.HTML(res.content).xpath( 649 | "//a[contains(@class,'inline-flex items-center justify-center')]/@href") 650 | if not token_url: 651 | try: 652 | url = cget('GET', url).url 653 | p_url = urlparse(url) 654 | res = cget( 655 | "GET", f"{p_url.scheme}://{p_url.hostname}/ddl/{url.split('/')[-1]}") 656 | except Exception as e: 657 | return (f'ERROR: {e.__class__.__name__}') 658 | drive_link = findall(r"myDl\('(.*?)'\)", res.text) 659 | if drive_link and "drive.google.com" in drive_link[0]: 660 | return drive_link[0] 661 | else: 662 | return ( 663 | 'ERROR: Drive Link not found, Try in your broswer') 664 | token_url = token_url[0] 665 | try: 666 | token_page = cget('GET', token_url) 667 | except Exception as e: 668 | return ( 669 | f'ERROR: {e.__class__.__name__} with {token_url}') 670 | path = findall('\("(.*?)"\)', token_page.text) 671 | if not path: 672 | return ('ERROR: Cannot bypass this') 673 | path = path[0] 674 | raw = urlparse(token_url) 675 | final_url = f'{raw.scheme}://{raw.hostname}{path}' 676 | return sharer_scraper(final_url) 677 | 678 | 679 | def sharer_scraper(url): 680 | cget = create_scraper().request 681 | try: 682 | url = cget('GET', url).url 683 | raw = urlparse(url) 684 | header = { 685 | "useragent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/7.0.548.0 Safari/534.10"} 686 | res = cget('GET', url, headers=header) 687 | except Exception as e: 688 | return (f'ERROR: {e.__class__.__name__}') 689 | key = findall('"key",\s+"(.*?)"', res.text) 690 | if not key: 691 | return ("ERROR: Key not found!") 692 | key = key[0] 693 | if not etree.HTML(res.content).xpath("//button[@id='drc']"): 694 | return ( 695 | "ERROR: This link don't have direct download button") 696 | boundary = uuid4() 697 | headers = { 698 | 'Content-Type': f'multipart/form-data; boundary=----WebKitFormBoundary{boundary}', 699 | 'x-token': raw.hostname, 700 | 'useragent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/7.0.548.0 Safari/534.10' 701 | } 702 | 703 | data = f'------WebKitFormBoundary{boundary}\r\nContent-Disposition: form-data; name="action"\r\n\r\ndirect\r\n' \ 704 | f'------WebKitFormBoundary{boundary}\r\nContent-Disposition: form-data; name="key"\r\n\r\n{key}\r\n' \ 705 | f'------WebKitFormBoundary{boundary}\r\nContent-Disposition: form-data; name="action_token"\r\n\r\n\r\n' \ 706 | f'------WebKitFormBoundary{boundary}--\r\n' 707 | try: 708 | res = cget("POST", url, cookies=res.cookies, 709 | headers=headers, data=data).json() 710 | except Exception as e: 711 | return (f'ERROR: {e.__class__.__name__}') 712 | if "url" not in res: 713 | return ( 714 | 'ERROR: Drive Link not found, Try in your broswer') 715 | if "drive.google.com" in res["url"]: 716 | return res["url"] 717 | try: 718 | res = cget('GET', res["url"]) 719 | except Exception as e: 720 | return (f'ERROR: {e.__class__.__name__}') 721 | html_tree = etree.HTML(res.content) 722 | drive_link = html_tree.xpath("//a[contains(@class,'btn')]/@href") 723 | if drive_link and "drive.google.com" in drive_link[0]: 724 | return drive_link[0] 725 | else: 726 | return ( 727 | 'ERROR: Drive Link not found, Try in your broswer') 728 | 729 | 730 | def wetransfer(url): 731 | cget = create_scraper().request 732 | try: 733 | url = cget('GET', url).url 734 | json_data = { 735 | 'security_hash': url.split('/')[-1], 736 | 'intent': 'entire_transfer' 737 | } 738 | res = cget( 739 | 'POST', f'https://wetransfer.com/api/v4/transfers/{url.split("/")[-2]}/download', json=json_data).json() 740 | except Exception as e: 741 | return (f'ERROR: {e.__class__.__name__}') 742 | if "direct_link" in res: 743 | return res["direct_link"] 744 | elif "message" in res: 745 | return (f"ERROR: {res['message']}") 746 | elif "error" in res: 747 | return (f"ERROR: {res['error']}") 748 | else: 749 | return ("ERROR: cannot find direct link") 750 | 751 | 752 | def akmfiles(url): 753 | cget = create_scraper().request 754 | try: 755 | url = cget('GET', url).url 756 | json_data = { 757 | 'op': 'download2', 758 | 'id': url.split('/')[-1] 759 | } 760 | res = cget('POST', url, data=json_data) 761 | except Exception as e: 762 | return (f'ERROR: {e.__class__.__name__}') 763 | html_tree = etree.HTML(res.content) 764 | direct_link = html_tree.xpath("//a[contains(@class,'btn btn-dow')]/@href") 765 | if direct_link: 766 | return direct_link[0] 767 | else: 768 | return ('ERROR: Direct link not found') 769 | 770 | 771 | def shrdsk(url): 772 | cget = create_scraper().request 773 | try: 774 | url = cget('GET', url).url 775 | res = cget( 776 | 'GET', f'https://us-central1-affiliate2apk.cloudfunctions.net/get_data?shortid={url.split("/")[-1]}') 777 | except Exception as e: 778 | return (f'ERROR: {e.__class__.__name__}') 779 | if res.status_code != 200: 780 | return ( 781 | f'ERROR: Status Code {res.status_code}') 782 | res = res.json() 783 | if ("type" in res and res["type"].lower() == "upload" and "video_url" in res): 784 | return res["video_url"] 785 | return ("ERROR: cannot find direct link") 786 | 787 | 788 | def linkbox(url): 789 | cget = create_scraper().request 790 | try: 791 | url = cget('GET', url).url 792 | res = cget( 793 | 'GET', f'https://www.linkbox.to/api/file/detail?itemId={url.split("/")[-1]}').json() 794 | except Exception as e: 795 | return (f'ERROR: {e.__class__.__name__}') 796 | if 'data' not in res: 797 | return ('ERROR: Data not found!!') 798 | data = res['data'] 799 | if not data: 800 | return ('ERROR: Data is None!!') 801 | if 'itemInfo' not in data: 802 | return ('ERROR: itemInfo not found!!') 803 | itemInfo = data['itemInfo'] 804 | if 'url' not in itemInfo: 805 | return ('ERROR: url not found in itemInfo!!') 806 | if "name" not in itemInfo: 807 | return ( 808 | 'ERROR: Name not found in itemInfo!!') 809 | name = quote(itemInfo["name"]) 810 | raw = itemInfo['url'].split("/", 3)[-1] 811 | return f'https://wdl.nuplink.net/{raw}&filename={name}' 812 | 813 | 814 | def zippyshare(url): 815 | cget = create_scraper().request 816 | try: 817 | url = cget('GET', url).url 818 | resp = cget('GET', url) 819 | except Exception as e: 820 | return (f'ERROR: {e.__class__.__name__}') 821 | if not resp.ok: 822 | return ( 823 | 'ERROR: Something went wrong!!, Try in your browser') 824 | if findall(r'>File does not exist on this server<', resp.text): 825 | return ( 826 | 'ERROR: File does not exist on server!!, Try in your browser') 827 | pages = etree.HTML(resp.text).xpath( 828 | "//script[contains(text(),'dlbutton')][3]/text()") 829 | if not pages: 830 | return ('ERROR: Page not found!!') 831 | js_script = pages[0] 832 | uri1 = None 833 | uri2 = None 834 | method = '' 835 | omg = findall(r"\.omg.=.(.*?);", js_script) 836 | var_a = findall(r"var.a.=.(\d+)", js_script) 837 | var_ab = findall(r"var.[ab].=.(\d+)", js_script) 838 | unknown = findall(r"\+\((.*?).\+", js_script) 839 | unknown1 = findall(r"\+.\((.*?)\).\+", js_script) 840 | if omg: 841 | omg = omg[0] 842 | method = f'omg = {omg}' 843 | mtk = (eval(omg) * (int(omg.split("%")[0]) % 3)) + 18 844 | uri1 = findall(r'"/(d/\S+)/"', js_script) 845 | uri2 = findall(r'\/d.*?\+"/(\S+)";', js_script) 846 | elif var_a: 847 | var_a = var_a[0] 848 | method = f'var_a = {var_a}' 849 | mtk = int(pow(int(var_a), 3) + 3) 850 | uri1 = findall(r"\.href.=.\"/(.*?)/\"", js_script) 851 | uri2 = findall(r"\+\"/(.*?)\"", js_script) 852 | elif var_ab: 853 | a = var_ab[0] 854 | b = var_ab[1] 855 | method = f'a = {a}, b = {b}' 856 | mtk = eval(f"{floor(int(a)/3) + int(a) % int(b)}") 857 | uri1 = findall(r"\.href.=.\"/(.*?)/\"", js_script) 858 | uri2 = findall(r"\)\+\"/(.*?)\"", js_script) 859 | elif unknown: 860 | method = f'unknown = {unknown[0]}' 861 | mtk = eval(f"{unknown[0]}+ 11") 862 | uri1 = findall(r"\.href.=.\"/(.*?)/\"", js_script) 863 | uri2 = findall(r"\)\+\"/(.*?)\"", js_script) 864 | elif unknown1: 865 | method = f'unknown1 = {unknown1[0]}' 866 | mtk = eval(unknown1[0]) 867 | uri1 = findall(r"\.href.=.\"/(.*?)/\"", js_script) 868 | uri2 = findall(r"\+.\"/(.*?)\"", js_script) 869 | else: 870 | return ("ERROR: Direct link not found") 871 | if not any([uri1, uri2]): 872 | return ( 873 | f"ERROR: uri1 or uri2 not found with method {method}") 874 | domain = urlparse(url).hostname 875 | return f"https://{domain}/{uri1[0]}/{mtk}/{uri2[0]}" 876 | -------------------------------------------------------------------------------- /bypasser.py: -------------------------------------------------------------------------------- 1 | import re 2 | import requests 3 | import base64 4 | from urllib.parse import unquote, urlparse, quote 5 | import time 6 | import cloudscraper 7 | from bs4 import BeautifulSoup, NavigableString, Tag 8 | from lxml import etree 9 | import hashlib 10 | import json 11 | from asyncio import sleep as asleep 12 | import ddl 13 | from cfscrape import create_scraper 14 | from json import load 15 | from os import environ 16 | 17 | with open('config.json', 'r') as f: DATA = load(f) 18 | def getenv(var): return environ.get(var) or DATA.get(var, None) 19 | 20 | 21 | ########################################################## 22 | # ENVs 23 | 24 | GDTot_Crypt = getenv("CRYPT") 25 | Laravel_Session = getenv("Laravel_Session") 26 | XSRF_TOKEN = getenv("XSRF_TOKEN") 27 | DCRYPT = getenv("DRIVEFIRE_CRYPT") 28 | KCRYPT = getenv("KOLOP_CRYPT") 29 | HCRYPT = getenv("HUBDRIVE_CRYPT") 30 | KATCRYPT = getenv("KATDRIVE_CRYPT") 31 | CF = getenv("CLOUDFLARE") 32 | 33 | ############################################################ 34 | # Lists 35 | 36 | otherslist = ["exe.io","exey.io","sub2unlock.net","sub2unlock.com","rekonise.com","letsboost.net","ph.apps2app.com","mboost.me", 37 | "sub4unlock.com","ytsubme.com","social-unlock.com","boost.ink","goo.gl","shrto.ml","t.co"] 38 | 39 | gdlist = ["appdrive","driveapp","drivehub","gdflix","drivesharer","drivebit","drivelinks","driveace", 40 | "drivepro","driveseed"] 41 | 42 | 43 | ############################################################### 44 | # pdisk 45 | 46 | def pdisk(url): 47 | r = requests.get(url).text 48 | try: return r.split("")[0] 49 | except: 50 | try:return BeautifulSoup(r,"html.parser").find('video').find("source").get("src") 51 | except: return None 52 | 53 | ############################################################### 54 | # index scrapper 55 | 56 | def scrapeIndex(url, username="none", password="none"): 57 | 58 | def authorization_token(username, password): 59 | user_pass = f"{username}:{password}" 60 | return f"Basic {base64.b64encode(user_pass.encode()).decode()}" 61 | 62 | 63 | def decrypt(string): 64 | return base64.b64decode(string[::-1][24:-20]).decode('utf-8') 65 | 66 | 67 | def func(payload_input, url, username, password): 68 | next_page = False 69 | next_page_token = "" 70 | 71 | url = f"{url}/" if url[-1] != '/' else url 72 | 73 | try: headers = {"authorization":authorization_token(username,password)} 74 | except: return "username/password combination is wrong", None, None 75 | 76 | encrypted_response = requests.post(url, data=payload_input, headers=headers) 77 | if encrypted_response.status_code == 401: return "username/password combination is wrong", None, None 78 | 79 | try: decrypted_response = json.loads(decrypt(encrypted_response.text)) 80 | except: return "something went wrong. check index link/username/password field again", None, None 81 | 82 | page_token = decrypted_response["nextPageToken"] 83 | if page_token is None: 84 | next_page = False 85 | else: 86 | next_page = True 87 | next_page_token = page_token 88 | 89 | 90 | if list(decrypted_response.get("data").keys())[0] != "error": 91 | file_length = len(decrypted_response["data"]["files"]) 92 | result = "" 93 | 94 | for i, _ in enumerate(range(file_length)): 95 | files_type = decrypted_response["data"]["files"][i]["mimeType"] 96 | if files_type != "application/vnd.google-apps.folder": 97 | files_name = decrypted_response["data"]["files"][i]["name"] 98 | 99 | direct_download_link = url + quote(files_name) 100 | result += f"• {files_name} :\n{direct_download_link}\n\n" 101 | return result, next_page, next_page_token 102 | 103 | def format(result): 104 | long_string = ''.join(result) 105 | new_list = [] 106 | 107 | while len(long_string) > 0: 108 | if len(long_string) > 4000: 109 | split_index = long_string.rfind("\n\n", 0, 4000) 110 | if split_index == -1: 111 | split_index = 4000 112 | else: 113 | split_index = len(long_string) 114 | 115 | new_list.append(long_string[:split_index]) 116 | long_string = long_string[split_index:].lstrip("\n\n") 117 | 118 | return new_list 119 | 120 | # main 121 | x = 0 122 | next_page = False 123 | next_page_token = "" 124 | result = [] 125 | 126 | payload = {"page_token":next_page_token, "page_index": x} 127 | print(f"Index Link: {url}\n") 128 | temp, next_page, next_page_token = func(payload, url, username, password) 129 | if temp is not None: result.append(temp) 130 | 131 | while next_page == True: 132 | payload = {"page_token":next_page_token, "page_index": x} 133 | temp, next_page, next_page_token = func(payload, url, username, password) 134 | if temp is not None: result.append(temp) 135 | x += 1 136 | 137 | if len(result)==0: return None 138 | return format(result) 139 | 140 | 141 | ############################################################## 142 | # tnlink 143 | 144 | def tnlink(url): 145 | client = requests.session() 146 | DOMAIN = "https://page.tnlink.in/" 147 | url = url[:-1] if url[-1] == '/' else url 148 | code = url.split("/")[-1] 149 | final_url = f"{DOMAIN}/{code}" 150 | ref = "https://usanewstoday.club/" 151 | h = {"referer": ref} 152 | while len(client.cookies) == 0: 153 | resp = client.get(final_url,headers=h) 154 | time.sleep(2) 155 | soup = BeautifulSoup(resp.content, "html.parser") 156 | inputs = soup.find_all("input") 157 | data = { input.get('name'): input.get('value') for input in inputs } 158 | h = { "x-requested-with": "XMLHttpRequest" } 159 | time.sleep(8) 160 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 161 | try: return r.json()['url'] 162 | except: return "Something went wrong :(" 163 | 164 | 165 | ############################################################### 166 | # psa 167 | 168 | def try2link_bypass(url): 169 | client = cloudscraper.create_scraper(allow_brotli=False) 170 | 171 | url = url[:-1] if url[-1] == '/' else url 172 | 173 | params = (('d', int(time.time()) + (60 * 4)),) 174 | r = client.get(url, params=params, headers= {'Referer': 'https://newforex.online/'}) 175 | 176 | soup = BeautifulSoup(r.text, 'html.parser') 177 | inputs = soup.find(id="go-link").find_all(name="input") 178 | data = { input.get('name'): input.get('value') for input in inputs } 179 | time.sleep(7) 180 | 181 | headers = {'Host': 'try2link.com', 'X-Requested-With': 'XMLHttpRequest', 'Origin': 'https://try2link.com', 'Referer': url} 182 | 183 | bypassed_url = client.post('https://try2link.com/links/go', headers=headers,data=data) 184 | return bypassed_url.json()["url"] 185 | 186 | 187 | def try2link_scrape(url): 188 | client = cloudscraper.create_scraper(allow_brotli=False) 189 | h = { 190 | 'upgrade-insecure-requests': '1', 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36', 191 | } 192 | res = client.get(url, cookies={}, headers=h) 193 | url = 'https://try2link.com/'+re.findall('try2link\.com\/(.*?) ', res.text)[0] 194 | return try2link_bypass(url) 195 | 196 | 197 | def psa_bypasser(psa_url): 198 | cookies = {'cf_clearance': CF } 199 | headers = { 200 | 'authority': 'psa.wf', 201 | 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7', 202 | 'accept-language': 'en-US,en;q=0.9', 203 | 'referer': 'https://psa.wf/', 204 | 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36', 205 | } 206 | 207 | r = requests.get(psa_url, headers=headers, cookies=cookies) 208 | soup = BeautifulSoup(r.text, "html.parser").find_all(class_="dropshadowboxes-drop-shadow dropshadowboxes-rounded-corners dropshadowboxes-inside-and-outside-shadow dropshadowboxes-lifted-both dropshadowboxes-effect-default") 209 | links = [] 210 | for link in soup: 211 | try: 212 | exit_gate = link.a.get("href") 213 | if "/exit" in exit_gate: 214 | print("scraping :",exit_gate) 215 | links.append(try2link_scrape(exit_gate)) 216 | except: pass 217 | 218 | finals = "" 219 | for li in links: 220 | try: 221 | res = requests.get(li, headers=headers, cookies=cookies) 222 | soup = BeautifulSoup(res.text,"html.parser") 223 | name = soup.find("h1",class_="entry-title", itemprop="headline").getText() 224 | finals += "**" + name + "**\n\n" 225 | soup = soup.find("div", class_="entry-content" ,itemprop="text").findAll("a") 226 | for ele in soup: finals += "○ " + ele.get("href") + "\n" 227 | finals += "\n\n" 228 | except: finals += li + "\n\n" 229 | return finals 230 | 231 | 232 | ################################################################################################################## 233 | # rocklinks 234 | 235 | def rocklinks(url): 236 | client = cloudscraper.create_scraper(allow_brotli=False) 237 | if 'rocklinks.net' in url: 238 | DOMAIN = "https://blog.disheye.com" 239 | else: 240 | DOMAIN = "https://rocklinks.net" 241 | 242 | url = url[:-1] if url[-1] == '/' else url 243 | 244 | code = url.split("/")[-1] 245 | if 'rocklinks.net' in url: 246 | final_url = f"{DOMAIN}/{code}?quelle=" 247 | else: 248 | final_url = f"{DOMAIN}/{code}" 249 | 250 | resp = client.get(final_url) 251 | soup = BeautifulSoup(resp.content, "html.parser") 252 | 253 | try: inputs = soup.find(id="go-link").find_all(name="input") 254 | except: return "Incorrect Link" 255 | 256 | data = { input.get('name'): input.get('value') for input in inputs } 257 | 258 | h = { "x-requested-with": "XMLHttpRequest" } 259 | 260 | time.sleep(10) 261 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 262 | try: 263 | return r.json()['url'] 264 | except: return "Something went wrong :(" 265 | 266 | 267 | ################################################ 268 | # igg games 269 | 270 | def decodeKey(encoded): 271 | key = '' 272 | 273 | i = len(encoded) // 2 - 5 274 | while i >= 0: 275 | key += encoded[i] 276 | i = i - 2 277 | 278 | i = len(encoded) // 2 + 4 279 | while i < len(encoded): 280 | key += encoded[i] 281 | i = i + 2 282 | 283 | return key 284 | 285 | def bypassBluemediafiles(url, torrent=False): 286 | headers = { 287 | 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0', 288 | 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8', 289 | 'Accept-Language': 'en-US,en;q=0.5', 290 | 'Alt-Used': 'bluemediafiles.com', 291 | 'Connection': 'keep-alive', 292 | 'Upgrade-Insecure-Requests': '1', 293 | 'Sec-Fetch-Dest': 'document', 294 | 'Sec-Fetch-Mode': 'navigate', 295 | 'Sec-Fetch-Site': 'none', 296 | 'Sec-Fetch-User': '?1', 297 | } 298 | 299 | res = requests.get(url, headers=headers) 300 | soup = BeautifulSoup(res.text, 'html.parser') 301 | script = str(soup.findAll('script')[3]) 302 | encodedKey = script.split('Create_Button("')[1].split('");')[0] 303 | 304 | headers = { 305 | 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0', 306 | 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8', 307 | 'Accept-Language': 'en-US,en;q=0.5', 308 | 'Referer': url, 309 | 'Alt-Used': 'bluemediafiles.com', 310 | 'Connection': 'keep-alive', 311 | 'Upgrade-Insecure-Requests': '1', 312 | 'Sec-Fetch-Dest': 'document', 313 | 'Sec-Fetch-Mode': 'navigate', 314 | 'Sec-Fetch-Site': 'same-origin', 315 | 'Sec-Fetch-User': '?1', 316 | } 317 | 318 | params = { 'url': decodeKey(encodedKey) } 319 | 320 | if torrent: 321 | res = requests.get('https://dl.pcgamestorrents.org/get-url.php', params=params, headers=headers) 322 | soup = BeautifulSoup(res.text,"html.parser") 323 | furl = soup.find("a",class_="button").get("href") 324 | 325 | else: 326 | res = requests.get('https://bluemediafiles.com/get-url.php', params=params, headers=headers) 327 | furl = res.url 328 | if "mega.nz" in furl: 329 | furl = furl.replace("mega.nz/%23!","mega.nz/file/").replace("!","#") 330 | 331 | return furl 332 | 333 | def igggames(url): 334 | res = requests.get(url) 335 | soup = BeautifulSoup(res.text,"html.parser") 336 | soup = soup.find("div",class_="uk-margin-medium-top").findAll("a") 337 | 338 | bluelist = [] 339 | for ele in soup: bluelist.append(ele.get('href')) 340 | bluelist = bluelist[3:-1] 341 | 342 | links = "" 343 | last = None 344 | fix = True 345 | for ele in bluelist: 346 | if ele == "https://igg-games.com/how-to-install-a-pc-game-and-update.html": 347 | fix = False 348 | links += "\n" 349 | if "bluemediafile" in ele: 350 | tmp = bypassBluemediafiles(ele) 351 | if fix: 352 | tt = tmp.split("/")[2] 353 | if last is not None and tt != last: links += "\n" 354 | last = tt 355 | links = links + "○ " + tmp + "\n" 356 | elif "pcgamestorrents.com" in ele: 357 | res = requests.get(ele) 358 | soup = BeautifulSoup(res.text,"html.parser") 359 | turl = soup.find("p",class_="uk-card uk-card-body uk-card-default uk-card-hover").find("a").get("href") 360 | links = links + "🧲 `" + bypassBluemediafiles(turl,True) + "`\n\n" 361 | elif ele != "https://igg-games.com/how-to-install-a-pc-game-and-update.html": 362 | if fix: 363 | tt = ele.split("/")[2] 364 | if last is not None and tt != last: links += "\n" 365 | last = tt 366 | links = links + "○ " + ele + "\n" 367 | 368 | return links[:-1] 369 | 370 | 371 | ############################################################### 372 | # htpmovies cinevood sharespark atishmkv 373 | 374 | def htpmovies(link): 375 | client = cloudscraper.create_scraper(allow_brotli=False) 376 | r = client.get(link, allow_redirects=True).text 377 | j = r.split('("')[-1] 378 | url = j.split('")')[0] 379 | param = url.split("/")[-1] 380 | DOMAIN = "https://go.theforyou.in" 381 | final_url = f"{DOMAIN}/{param}" 382 | resp = client.get(final_url) 383 | soup = BeautifulSoup(resp.content, "html.parser") 384 | try: inputs = soup.find(id="go-link").find_all(name="input") 385 | except: return "Incorrect Link" 386 | data = { input.get('name'): input.get('value') for input in inputs } 387 | h = { "x-requested-with": "XMLHttpRequest" } 388 | time.sleep(10) 389 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 390 | try: 391 | return r.json()['url'] 392 | except: return "Something went Wrong !!" 393 | 394 | 395 | def scrappers(link): 396 | 397 | try: link = re.match(r"((http|https)\:\/\/)?[a-zA-Z0-9\.\/\?\:@\-_=#]+\.([a-zA-Z]){2,6}([a-zA-Z0-9\.\&\/\?\:@\-_=#])*", link)[0] 398 | except TypeError: return 'Not a Valid Link.' 399 | links = [] 400 | 401 | if "sharespark" in link: 402 | gd_txt = "" 403 | res = requests.get("?action=printpage;".join(link.split('?'))) 404 | soup = BeautifulSoup(res.text, 'html.parser') 405 | for br in soup.findAll('br'): 406 | next_s = br.nextSibling 407 | if not (next_s and isinstance(next_s,NavigableString)): 408 | continue 409 | next2_s = next_s.nextSibling 410 | if next2_s and isinstance(next2_s,Tag) and next2_s.name == 'br': 411 | text = str(next_s).strip() 412 | if text: 413 | result = re.sub(r'(?m)^\(https://i.*', '', next_s) 414 | star = re.sub(r'(?m)^\*.*', ' ', result) 415 | extra = re.sub(r'(?m)^\(https://e.*', ' ', star) 416 | gd_txt += ', '.join(re.findall(r'(?m)^.*https://new1.gdtot.cfd/file/[0-9][^.]*', next_s)) + "\n\n" 417 | return gd_txt 418 | 419 | elif "htpmovies" in link and "/exit.php" in link: 420 | return htpmovies(link) 421 | 422 | elif "htpmovies" in link: 423 | prsd = "" 424 | links = [] 425 | res = requests.get(link) 426 | soup = BeautifulSoup(res.text, 'html.parser') 427 | x = soup.select('a[href^="/exit.php?url="]') 428 | y = soup.select('h5') 429 | z = unquote(link.split('/')[-2]).split('-')[0] if link.endswith('/') else unquote(link.split('/')[-1]).split('-')[0] 430 | 431 | for a in x: 432 | links.append(a['href']) 433 | prsd = f"Total Links Found : {len(links)}\n\n" 434 | 435 | msdcnt = -1 436 | for b in y: 437 | if str(b.string).lower().startswith(z.lower()): 438 | msdcnt += 1 439 | url = f"https://htpmovies.lol"+links[msdcnt] 440 | prsd += f"{msdcnt+1}. {b.string}\n{htpmovies(url)}\n\n" 441 | asleep(5) 442 | return prsd 443 | 444 | elif "cinevood" in link: 445 | prsd = "" 446 | links = [] 447 | res = requests.get(link) 448 | soup = BeautifulSoup(res.text, 'html.parser') 449 | x = soup.select('a[href^="https://kolop.icu/file"]') 450 | for a in x: 451 | links.append(a['href']) 452 | for o in links: 453 | res = requests.get(o) 454 | soup = BeautifulSoup(res.content, "html.parser") 455 | title = soup.title.string 456 | reftxt = re.sub(r'Kolop \| ', '', title) 457 | prsd += f'{reftxt}\n{o}\n\n' 458 | return prsd 459 | 460 | elif "atishmkv" in link: 461 | prsd = "" 462 | links = [] 463 | res = requests.get(link) 464 | soup = BeautifulSoup(res.text, 'html.parser') 465 | x = soup.select('a[href^="https://gdflix.top/file"]') 466 | for a in x: 467 | links.append(a['href']) 468 | for o in links: 469 | prsd += o + '\n\n' 470 | return prsd 471 | 472 | elif "teluguflix" in link: 473 | gd_txt = "" 474 | r = requests.get(link) 475 | soup = BeautifulSoup (r.text, "html.parser") 476 | links = soup.select('a[href*="gdtot"]') 477 | gd_txt = f"Total Links Found : {len(links)}\n\n" 478 | for no, link in enumerate(links, start=1): 479 | gdlk = link['href'] 480 | t = requests.get(gdlk) 481 | soupt = BeautifulSoup(t.text, "html.parser") 482 | title = soupt.select('meta[property^="og:description"]') 483 | gd_txt += f"{no}. {(title[0]['content']).replace('Download ' , '')}\n{gdlk}\n\n" 484 | asleep(1.5) 485 | return gd_txt 486 | 487 | elif "taemovies" in link: 488 | gd_txt, no = "", 0 489 | r = requests.get(link) 490 | soup = BeautifulSoup (r.text, "html.parser") 491 | links = soup.select('a[href*="shortingly"]') 492 | gd_txt = f"Total Links Found : {len(links)}\n\n" 493 | for a in links: 494 | glink = rocklinks(a["href"]) 495 | t = requests.get(glink) 496 | soupt = BeautifulSoup(t.text, "html.parser") 497 | title = soupt.select('meta[property^="og:description"]') 498 | no += 1 499 | gd_txt += f"{no}. {(title[0]['content']).replace('Download ' , '')}\n{glink}\n\n" 500 | return gd_txt 501 | 502 | elif "toonworld4all" in link: 503 | gd_txt, no = "", 0 504 | r = requests.get(link) 505 | soup = BeautifulSoup(r.text, "html.parser") 506 | links = soup.select('a[href*="redirect/main.php?"]') 507 | for a in links: 508 | down = requests.get(a['href'], stream=True, allow_redirects=False) 509 | link = down.headers["location"] 510 | glink = rocklinks(link) 511 | if glink and "gdtot" in glink: 512 | t = requests.get(glink) 513 | soupt = BeautifulSoup(t.text, "html.parser") 514 | title = soupt.select('meta[property^="og:description"]') 515 | no += 1 516 | gd_txt += f"{no}. {(title[0]['content']).replace('Download ' , '')}\n{glink}\n\n" 517 | return gd_txt 518 | 519 | elif "animeremux" in link: 520 | gd_txt, no = "", 0 521 | r = requests.get(link) 522 | soup = BeautifulSoup (r.text, "html.parser") 523 | links = soup.select('a[href*="urlshortx.com"]') 524 | gd_txt = f"Total Links Found : {len(links)}\n\n" 525 | for a in links: 526 | link = a["href"] 527 | x = link.split("url=")[-1] 528 | t = requests.get(x) 529 | soupt = BeautifulSoup(t.text, "html.parser") 530 | title = soupt.title 531 | no += 1 532 | gd_txt += f"{no}. {title.text}\n{x}\n\n" 533 | asleep(1.5) 534 | return gd_txt 535 | 536 | else: 537 | res = requests.get(link) 538 | soup = BeautifulSoup(res.text, 'html.parser') 539 | mystx = soup.select(r'a[href^="magnet:?xt=urn:btih:"]') 540 | for hy in mystx: 541 | links.append(hy['href']) 542 | return links 543 | 544 | 545 | ################################################### 546 | # script links 547 | 548 | def getfinal(domain, url, sess): 549 | 550 | #sess = requests.session() 551 | res = sess.get(url) 552 | soup = BeautifulSoup(res.text,"html.parser") 553 | soup = soup.find("form").findAll("input") 554 | datalist = [] 555 | for ele in soup: 556 | datalist.append(ele.get("value")) 557 | 558 | data = { 559 | '_method': datalist[0], 560 | '_csrfToken': datalist[1], 561 | 'ad_form_data': datalist[2], 562 | '_Token[fields]': datalist[3], 563 | '_Token[unlocked]': datalist[4], 564 | } 565 | 566 | sess.headers = { 567 | 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0', 568 | 'Accept': 'application/json, text/javascript, */*; q=0.01', 569 | 'Accept-Language': 'en-US,en;q=0.5', 570 | 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', 571 | 'X-Requested-With': 'XMLHttpRequest', 572 | 'Origin': domain, 573 | 'Connection': 'keep-alive', 574 | 'Referer': url, 575 | 'Sec-Fetch-Dest': 'empty', 576 | 'Sec-Fetch-Mode': 'cors', 577 | 'Sec-Fetch-Site': 'same-origin', 578 | } 579 | 580 | # print("waiting 10 secs") 581 | time.sleep(10) # important 582 | response = sess.post(domain+'/links/go', data=data).json() 583 | furl = response["url"] 584 | return furl 585 | 586 | 587 | def getfirst(url): 588 | 589 | sess = requests.session() 590 | res = sess.get(url) 591 | 592 | soup = BeautifulSoup(res.text,"html.parser") 593 | soup = soup.find("form") 594 | action = soup.get("action") 595 | soup = soup.findAll("input") 596 | datalist = [] 597 | for ele in soup: 598 | datalist.append(ele.get("value")) 599 | sess.headers = { 600 | 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0', 601 | 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8', 602 | 'Accept-Language': 'en-US,en;q=0.5', 603 | 'Origin': action, 604 | 'Connection': 'keep-alive', 605 | 'Referer': action, 606 | 'Upgrade-Insecure-Requests': '1', 607 | 'Sec-Fetch-Dest': 'document', 608 | 'Sec-Fetch-Mode': 'navigate', 609 | 'Sec-Fetch-Site': 'same-origin', 610 | 'Sec-Fetch-User': '?1', 611 | } 612 | 613 | data = {'newwpsafelink': datalist[1], "g-recaptcha-response": RecaptchaV3()} 614 | response = sess.post(action, data=data) 615 | soup = BeautifulSoup(response.text, "html.parser") 616 | soup = soup.findAll("div", class_="wpsafe-bottom text-center") 617 | for ele in soup: 618 | rurl = ele.find("a").get("onclick")[13:-12] 619 | 620 | res = sess.get(rurl) 621 | furl = res.url 622 | # print(furl) 623 | return getfinal(f'https://{furl.split("/")[-2]}/',furl,sess) 624 | 625 | 626 | #################################################################################################### 627 | # ez4short 628 | 629 | def ez4(url): 630 | client = cloudscraper.create_scraper(allow_brotli=False) 631 | DOMAIN = "https://ez4short.com" 632 | ref = "https://techmody.io/" 633 | h = {"referer": ref} 634 | resp = client.get(url,headers=h) 635 | soup = BeautifulSoup(resp.content, "html.parser") 636 | inputs = soup.find_all("input") 637 | data = { input.get('name'): input.get('value') for input in inputs } 638 | h = { "x-requested-with": "XMLHttpRequest" } 639 | time.sleep(8) 640 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 641 | try: return r.json()['url'] 642 | except: return "Something went wrong :(" 643 | 644 | 645 | ################################################ 646 | # ola movies 647 | 648 | def olamovies(url): 649 | 650 | print("this takes time, you might want to take a break.") 651 | headers = { 652 | 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0', 653 | 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8', 654 | 'Accept-Language': 'en-US,en;q=0.5', 655 | 'Referer': url, 656 | 'Alt-Used': 'olamovies.ink', 657 | 'Connection': 'keep-alive', 658 | 'Upgrade-Insecure-Requests': '1', 659 | 'Sec-Fetch-Dest': 'document', 660 | 'Sec-Fetch-Mode': 'navigate', 661 | 'Sec-Fetch-Site': 'same-origin', 662 | 'Sec-Fetch-User': '?1', 663 | } 664 | 665 | client = cloudscraper.create_scraper(allow_brotli=False) 666 | res = client.get(url) 667 | soup = BeautifulSoup(res.text,"html.parser") 668 | soup = soup.findAll("div", class_="wp-block-button") 669 | 670 | outlist = [] 671 | for ele in soup: 672 | outlist.append(ele.find("a").get("href")) 673 | 674 | slist = [] 675 | for ele in outlist: 676 | try: 677 | key = ele.split("?key=")[1].split("&id=")[0].replace("%2B","+").replace("%3D","=").replace("%2F","/") 678 | id = ele.split("&id=")[1] 679 | except: 680 | continue 681 | 682 | count = 3 683 | params = { 'key': key, 'id': id} 684 | soup = "None" 685 | 686 | while 'rocklinks.net' not in soup and "try2link.com" not in soup and "ez4short.com" not in soup: 687 | res = client.get("https://olamovies.ink/download/", params=params, headers=headers) 688 | soup = BeautifulSoup(res.text,"html.parser") 689 | soup = soup.findAll("a")[0].get("href") 690 | if soup != "": 691 | if "try2link.com" in soup or 'rocklinks.net' in soup or "ez4short.com" in soup: slist.append(soup) 692 | else: pass 693 | else: 694 | if count == 0: break 695 | else: count -= 1 696 | 697 | time.sleep(10) 698 | 699 | final = [] 700 | for ele in slist: 701 | if "rocklinks.net" in ele: 702 | final.append(rocklinks(ele)) 703 | elif "try2link.com" in ele: 704 | final.append(try2link_bypass(ele)) 705 | elif "ez4short.com" in ele: 706 | final.append(ez4(ele)) 707 | else: 708 | pass 709 | 710 | links = "" 711 | for ele in final: 712 | links = links + ele + "\n" 713 | return links[:-1] 714 | 715 | 716 | ############################################### 717 | # katdrive 718 | 719 | def parse_info_katdrive(res): 720 | info_parsed = {} 721 | title = re.findall('>(.*?)<\/h4>', res.text)[0] 722 | info_chunks = re.findall('>(.*?)<\/td>', res.text) 723 | info_parsed['title'] = title 724 | for i in range(0, len(info_chunks), 2): 725 | info_parsed[info_chunks[i]] = info_chunks[i+1] 726 | return info_parsed 727 | 728 | def katdrive_dl(url,katcrypt): 729 | client = requests.Session() 730 | client.cookies.update({'crypt': katcrypt}) 731 | 732 | res = client.get(url) 733 | info_parsed = parse_info_katdrive(res) 734 | info_parsed['error'] = False 735 | 736 | up = urlparse(url) 737 | req_url = f"{up.scheme}://{up.netloc}/ajax.php?ajax=download" 738 | 739 | file_id = url.split('/')[-1] 740 | data = { 'id': file_id } 741 | headers = {'x-requested-with': 'XMLHttpRequest'} 742 | 743 | try: 744 | res = client.post(req_url, headers=headers, data=data).json()['file'] 745 | except: 746 | return "Error"#{'error': True, 'src_url': url} 747 | 748 | gd_id = re.findall('gd=(.*)', res, re.DOTALL)[0] 749 | info_parsed['gdrive_url'] = f"https://drive.google.com/open?id={gd_id}" 750 | info_parsed['src_url'] = url 751 | return info_parsed['gdrive_url'] 752 | 753 | 754 | ############################################### 755 | # hubdrive 756 | 757 | def parse_info_hubdrive(res): 758 | info_parsed = {} 759 | title = re.findall('>(.*?)<\/h4>', res.text)[0] 760 | info_chunks = re.findall('>(.*?)<\/td>', res.text) 761 | info_parsed['title'] = title 762 | for i in range(0, len(info_chunks), 2): 763 | info_parsed[info_chunks[i]] = info_chunks[i+1] 764 | return info_parsed 765 | 766 | def hubdrive_dl(url,hcrypt): 767 | client = requests.Session() 768 | client.cookies.update({'crypt': hcrypt}) 769 | 770 | res = client.get(url) 771 | info_parsed = parse_info_hubdrive(res) 772 | info_parsed['error'] = False 773 | 774 | up = urlparse(url) 775 | req_url = f"{up.scheme}://{up.netloc}/ajax.php?ajax=download" 776 | 777 | file_id = url.split('/')[-1] 778 | data = { 'id': file_id } 779 | headers = {'x-requested-with': 'XMLHttpRequest'} 780 | 781 | try: 782 | res = client.post(req_url, headers=headers, data=data).json()['file'] 783 | except: 784 | return "Error"#{'error': True, 'src_url': url} 785 | 786 | gd_id = re.findall('gd=(.*)', res, re.DOTALL)[0] 787 | info_parsed['gdrive_url'] = f"https://drive.google.com/open?id={gd_id}" 788 | info_parsed['src_url'] = url 789 | return info_parsed['gdrive_url'] 790 | 791 | 792 | ################################################# 793 | # drivefire 794 | 795 | def parse_info_drivefire(res): 796 | info_parsed = {} 797 | title = re.findall('>(.*?)<\/h4>', res.text)[0] 798 | info_chunks = re.findall('>(.*?)<\/td>', res.text) 799 | info_parsed['title'] = title 800 | for i in range(0, len(info_chunks), 2): 801 | info_parsed[info_chunks[i]] = info_chunks[i+1] 802 | return info_parsed 803 | 804 | def drivefire_dl(url,dcrypt): 805 | client = requests.Session() 806 | client.cookies.update({'crypt': dcrypt}) 807 | 808 | res = client.get(url) 809 | info_parsed = parse_info_drivefire(res) 810 | info_parsed['error'] = False 811 | 812 | up = urlparse(url) 813 | req_url = f"{up.scheme}://{up.netloc}/ajax.php?ajax=download" 814 | 815 | file_id = url.split('/')[-1] 816 | data = { 'id': file_id } 817 | headers = {'x-requested-with': 'XMLHttpRequest'} 818 | 819 | try: 820 | res = client.post(req_url, headers=headers, data=data).json()['file'] 821 | except: 822 | return "Error"#{'error': True, 'src_url': url} 823 | 824 | decoded_id = res.rsplit('/', 1)[-1] 825 | info_parsed = f"https://drive.google.com/file/d/{decoded_id}" 826 | return info_parsed 827 | 828 | 829 | ################################################## 830 | # kolop 831 | 832 | def parse_info_kolop(res): 833 | info_parsed = {} 834 | title = re.findall('>(.*?)<\/h4>', res.text)[0] 835 | info_chunks = re.findall('>(.*?)<\/td>', res.text) 836 | info_parsed['title'] = title 837 | for i in range(0, len(info_chunks), 2): 838 | info_parsed[info_chunks[i]] = info_chunks[i+1] 839 | return info_parsed 840 | 841 | def kolop_dl(url,kcrypt): 842 | client = requests.Session() 843 | client.cookies.update({'crypt': kcrypt}) 844 | 845 | res = client.get(url) 846 | info_parsed = parse_info_kolop(res) 847 | info_parsed['error'] = False 848 | 849 | up = urlparse(url) 850 | req_url = f"{up.scheme}://{up.netloc}/ajax.php?ajax=download" 851 | 852 | file_id = url.split('/')[-1] 853 | data = { 'id': file_id } 854 | headers = { 'x-requested-with': 'XMLHttpRequest'} 855 | 856 | try: 857 | res = client.post(req_url, headers=headers, data=data).json()['file'] 858 | except: 859 | return "Error"#{'error': True, 'src_url': url} 860 | 861 | gd_id = re.findall('gd=(.*)', res, re.DOTALL)[0] 862 | info_parsed['gdrive_url'] = f"https://drive.google.com/open?id={gd_id}" 863 | info_parsed['src_url'] = url 864 | 865 | return info_parsed['gdrive_url'] 866 | 867 | 868 | ################################################## 869 | # mediafire 870 | 871 | def mediafire(url): 872 | 873 | res = requests.get(url, stream=True) 874 | contents = res.text 875 | 876 | for line in contents.splitlines(): 877 | m = re.search(r'href="((http|https)://download[^"]+)', line) 878 | if m: 879 | return m.groups()[0] 880 | 881 | 882 | #################################################### 883 | # zippyshare 884 | 885 | def zippyshare(url): 886 | resp = requests.get(url).text 887 | surl = resp.split("document.getElementById('dlbutton').href = ")[1].split(";")[0] 888 | parts = surl.split("(")[1].split(")")[0].split(" ") 889 | val = str(int(parts[0]) % int(parts[2]) + int(parts[4]) % int(parts[6])) 890 | surl = surl.split('"') 891 | burl = url.split("zippyshare.com")[0] 892 | furl = burl + "zippyshare.com" + surl[1] + val + surl[-2] 893 | return furl 894 | 895 | 896 | #################################################### 897 | # filercrypt 898 | 899 | def getlinks(dlc): 900 | headers = { 901 | 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:103.0) Gecko/20100101 Firefox/103.0', 902 | 'Accept': 'application/json, text/javascript, */*', 903 | 'Accept-Language': 'en-US,en;q=0.5', 904 | 'X-Requested-With': 'XMLHttpRequest', 905 | 'Origin': 'http://dcrypt.it', 906 | 'Connection': 'keep-alive', 907 | 'Referer': 'http://dcrypt.it/', 908 | } 909 | 910 | data = { 911 | 'content': dlc, 912 | } 913 | 914 | response = requests.post('http://dcrypt.it/decrypt/paste', headers=headers, data=data).json()["success"]["links"] 915 | links = "" 916 | for link in response: 917 | links = links + link + "\n\n" 918 | return links[:-1] 919 | 920 | 921 | def filecrypt(url): 922 | 923 | client = cloudscraper.create_scraper(allow_brotli=False) 924 | headers = { 925 | "authority": "filecrypt.co", 926 | "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", 927 | "accept-language": "en-US,en;q=0.9", 928 | "cache-control": "max-age=0", 929 | "content-type": "application/x-www-form-urlencoded", 930 | "dnt": "1", 931 | "origin": "https://filecrypt.co", 932 | "referer": url, 933 | "sec-ch-ua": '"Google Chrome";v="105", "Not)A;Brand";v="8", "Chromium";v="105"', 934 | "sec-ch-ua-mobile": "?0", 935 | "sec-ch-ua-platform": "Windows", 936 | "sec-fetch-dest": "document", 937 | "sec-fetch-mode": "navigate", 938 | "sec-fetch-site": "same-origin", 939 | "sec-fetch-user": "?1", 940 | "upgrade-insecure-requests": "1", 941 | "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36" 942 | } 943 | 944 | 945 | resp = client.get(url, headers=headers) 946 | soup = BeautifulSoup(resp.content, "html.parser") 947 | 948 | buttons = soup.find_all("button") 949 | for ele in buttons: 950 | line = ele.get("onclick") 951 | if line !=None and "DownloadDLC" in line: 952 | dlclink = "https://filecrypt.co/DLC/" + line.split("DownloadDLC('")[1].split("'")[0] + ".html" 953 | break 954 | 955 | resp = client.get(dlclink,headers=headers) 956 | return getlinks(resp.text,client) 957 | 958 | 959 | ##################################################### 960 | # dropbox 961 | 962 | def dropbox(url): 963 | return url.replace("www.","").replace("dropbox.com","dl.dropboxusercontent.com").replace("?dl=0","") 964 | 965 | 966 | ###################################################### 967 | # shareus 968 | 969 | def shareus(url): 970 | token = url.split("=")[-1] 971 | bypassed_url = "https://us-central1-my-apps-server.cloudfunctions.net/r?shortid="+ token 972 | response = requests.get(bypassed_url).text 973 | return response 974 | 975 | 976 | ####################################################### 977 | # shortingly 978 | 979 | def shortingly(url): 980 | client = cloudscraper.create_scraper(allow_brotli=False) 981 | DOMAIN = "https://shortingly.in" 982 | url = url[:-1] if url[-1] == '/' else url 983 | code = url.split("/")[-1] 984 | final_url = f"{DOMAIN}/{code}" 985 | ref = "https://tech.gyanitheme.com/" 986 | h = {"referer": ref} 987 | resp = client.get(final_url,headers=h) 988 | soup = BeautifulSoup(resp.content, "html.parser") 989 | inputs = soup.find_all("input") 990 | data = { input.get('name'): input.get('value') for input in inputs } 991 | h = { "x-requested-with": "XMLHttpRequest" } 992 | time.sleep(5) 993 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 994 | try: return r.json()['url'] 995 | except: return "Something went wrong :(" 996 | 997 | ####################################################### 998 | # Gyanilinks - gtlinks.me 999 | 1000 | def gyanilinks(url): 1001 | DOMAIN = "https://go.theforyou.in/" 1002 | client = cloudscraper.create_scraper(allow_brotli=False) 1003 | url = url[:-1] if url[-1] == '/' else url 1004 | code = url.split("/")[-1] 1005 | final_url = f"{DOMAIN}/{code}" 1006 | resp = client.get(final_url) 1007 | soup = BeautifulSoup(resp.content, "html.parser") 1008 | try: inputs = soup.find(id="go-link").find_all(name="input") 1009 | except: return "Incorrect Link" 1010 | data = { input.get('name'): input.get('value') for input in inputs } 1011 | h = { "x-requested-with": "XMLHttpRequest" } 1012 | time.sleep(5) 1013 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 1014 | try: return r.json()['url'] 1015 | except: return "Something went wrong :(" 1016 | 1017 | 1018 | ####################################################### 1019 | # Flashlink 1020 | 1021 | def flashl(url): 1022 | client = cloudscraper.create_scraper(allow_brotli=False) 1023 | DOMAIN = "https://files.earnash.com/" 1024 | url = url[:-1] if url[-1] == '/' else url 1025 | code = url.split("/")[-1] 1026 | final_url = f"{DOMAIN}/{code}" 1027 | ref = "https://flash1.cordtpoint.co.in" 1028 | h = {"referer": ref} 1029 | resp = client.get(final_url,headers=h) 1030 | soup = BeautifulSoup(resp.content, "html.parser") 1031 | inputs = soup.find_all("input") 1032 | data = { input.get('name'): input.get('value') for input in inputs } 1033 | h = { "x-requested-with": "XMLHttpRequest" } 1034 | time.sleep(15) 1035 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 1036 | try: return r.json()['url'] 1037 | except: return "Something went wrong :(" 1038 | 1039 | 1040 | ####################################################### 1041 | # short2url 1042 | 1043 | def short2url(url): 1044 | client = cloudscraper.create_scraper(allow_brotli=False) 1045 | DOMAIN = "https://techyuth.xyz/blog" 1046 | url = url[:-1] if url[-1] == '/' else url 1047 | code = url.split("/")[-1] 1048 | final_url = f"{DOMAIN}/{code}" 1049 | ref = "https://blog.coin2pay.xyz/" 1050 | h = {"referer": ref} 1051 | resp = client.get(final_url, headers=h) 1052 | soup = BeautifulSoup(resp.content, "html.parser") 1053 | inputs = soup.find_all("input") 1054 | data = { input.get('name'): input.get('value') for input in inputs } 1055 | h = { "x-requested-with": "XMLHttpRequest" } 1056 | time.sleep(10) 1057 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 1058 | try: 1059 | return r.json()['url'] 1060 | except: return "Something went wrong :(" 1061 | 1062 | 1063 | ####################################################### 1064 | # anonfiles 1065 | 1066 | def anonfile(url): 1067 | 1068 | headersList = { "Accept": "*/*"} 1069 | payload = "" 1070 | 1071 | response = requests.request("GET", url, data=payload, headers=headersList).text.split("\n") 1072 | for ele in response: 1073 | if "https://cdn" in ele and "anonfiles.com" in ele and url.split("/")[-2] in ele: 1074 | break 1075 | 1076 | return ele.split('href="')[1].split('"')[0] 1077 | 1078 | 1079 | ########################################################## 1080 | # pixl 1081 | 1082 | def pixl(url): 1083 | count = 1 1084 | dl_msg = "" 1085 | currentpage = 1 1086 | settotalimgs = True 1087 | totalimages = "" 1088 | client = cloudscraper.create_scraper(allow_brotli=False) 1089 | resp = client.get(url) 1090 | if resp.status_code == 404: 1091 | return "File not found/The link you entered is wrong!" 1092 | soup = BeautifulSoup(resp.content, "html.parser") 1093 | if "album" in url and settotalimgs: 1094 | totalimages = soup.find("span", {"data-text": "image-count"}).text 1095 | settotalimgs = False 1096 | thmbnailanch = soup.findAll(attrs={"class": "--media"}) 1097 | links = soup.findAll(attrs={"data-pagination": "next"}) 1098 | try: 1099 | url = links[0].attrs["href"] 1100 | except BaseException: 1101 | url = None 1102 | for ref in thmbnailanch: 1103 | imgdata = client.get(ref.attrs["href"]) 1104 | if not imgdata.status_code == 200: 1105 | time.sleep(5) 1106 | continue 1107 | imghtml = BeautifulSoup(imgdata.text, "html.parser") 1108 | downloadanch = imghtml.find(attrs={"class": "btn-download"}) 1109 | currentimg = downloadanch.attrs["href"] 1110 | currentimg = currentimg.replace(" ", "%20") 1111 | dl_msg += f"{count}. {currentimg}\n" 1112 | count += 1 1113 | currentpage += 1 1114 | fld_msg = f"Your provided Pixl.is link is of Folder and I've Found {count - 1} files in the folder.\n" 1115 | fld_link = f"\nFolder Link: {url}\n" 1116 | final_msg = fld_link + "\n" + fld_msg + "\n" + dl_msg 1117 | return final_msg 1118 | 1119 | 1120 | ############################################################ 1121 | # sirigan ( unused ) 1122 | 1123 | def siriganbypass(url): 1124 | client = requests.Session() 1125 | res = client.get(url) 1126 | url = res.url.split('=', maxsplit=1)[-1] 1127 | 1128 | while True: 1129 | try: url = base64.b64decode(url).decode('utf-8') 1130 | except: break 1131 | 1132 | return url.split('url=')[-1] 1133 | 1134 | 1135 | ############################################################ 1136 | # shorte 1137 | 1138 | def sh_st_bypass(url): 1139 | client = requests.Session() 1140 | client.headers.update({'referer': url}) 1141 | p = urlparse(url) 1142 | 1143 | res = client.get(url) 1144 | 1145 | sess_id = re.findall('''sessionId(?:\s+)?:(?:\s+)?['|"](.*?)['|"]''', res.text)[0] 1146 | 1147 | final_url = f"{p.scheme}://{p.netloc}/shortest-url/end-adsession" 1148 | params = { 1149 | 'adSessionId': sess_id, 1150 | 'callback': '_' 1151 | } 1152 | time.sleep(5) # !important 1153 | 1154 | res = client.get(final_url, params=params) 1155 | dest_url = re.findall('"(.*?)"', res.text)[1].replace('\/','/') 1156 | 1157 | return { 1158 | 'src': url, 1159 | 'dst': dest_url 1160 | }['dst'] 1161 | 1162 | 1163 | ############################################################# 1164 | # gofile 1165 | 1166 | def gofile_dl(url,password=""): 1167 | api_uri = 'https://api.gofile.io' 1168 | client = requests.Session() 1169 | res = client.get(api_uri+'/createAccount').json() 1170 | 1171 | data = { 1172 | 'contentId': url.split('/')[-1], 1173 | 'token': res['data']['token'], 1174 | 'websiteToken': '12345', 1175 | 'cache': 'true', 1176 | 'password': hashlib.sha256(password.encode('utf-8')).hexdigest() 1177 | } 1178 | res = client.get(api_uri+'/getContent', params=data).json() 1179 | 1180 | content = [] 1181 | for item in res['data']['contents'].values(): 1182 | content.append(item) 1183 | 1184 | return { 1185 | 'accountToken': data['token'], 1186 | 'files': content 1187 | }["files"][0]["link"] 1188 | 1189 | 1190 | ################################################################ 1191 | # sharer pw 1192 | 1193 | def parse_info_sharer(res): 1194 | f = re.findall(">(.*?)<\/td>", res.text) 1195 | info_parsed = {} 1196 | for i in range(0, len(f), 3): 1197 | info_parsed[f[i].lower().replace(' ', '_')] = f[i+2] 1198 | return info_parsed 1199 | 1200 | def sharer_pw(url,Laravel_Session, XSRF_TOKEN, forced_login=False): 1201 | client = cloudscraper.create_scraper(allow_brotli=False) 1202 | client.cookies.update({ 1203 | "XSRF-TOKEN": XSRF_TOKEN, 1204 | "laravel_session": Laravel_Session 1205 | }) 1206 | res = client.get(url) 1207 | token = re.findall("_token\s=\s'(.*?)'", res.text, re.DOTALL)[0] 1208 | ddl_btn = etree.HTML(res.content).xpath("//button[@id='btndirect']") 1209 | info_parsed = parse_info_sharer(res) 1210 | info_parsed['error'] = True 1211 | info_parsed['src_url'] = url 1212 | info_parsed['link_type'] = 'login' 1213 | info_parsed['forced_login'] = forced_login 1214 | headers = { 1215 | 'content-type': 'application/x-www-form-urlencoded; charset=UTF-8', 1216 | 'x-requested-with': 'XMLHttpRequest' 1217 | } 1218 | data = { 1219 | '_token': token 1220 | } 1221 | if len(ddl_btn): 1222 | info_parsed['link_type'] = 'direct' 1223 | if not forced_login: 1224 | data['nl'] = 1 1225 | try: 1226 | res = client.post(url+'/dl', headers=headers, data=data).json() 1227 | except: 1228 | return info_parsed 1229 | if 'url' in res and res['url']: 1230 | info_parsed['error'] = False 1231 | info_parsed['gdrive_link'] = res['url'] 1232 | if len(ddl_btn) and not forced_login and not 'url' in info_parsed: 1233 | # retry download via login 1234 | return sharer_pw(url,Laravel_Session, XSRF_TOKEN, forced_login=True) 1235 | return info_parsed["gdrive_link"] 1236 | 1237 | 1238 | ################################################################# 1239 | # gdtot 1240 | 1241 | def gdtot(url): 1242 | cget = create_scraper().request 1243 | try: 1244 | res = cget('GET', f'https://gdbot.xyz/file/{url.split("/")[-1]}') 1245 | except Exception as e: 1246 | return (f'ERROR: {e.__class__.__name__}') 1247 | token_url = etree.HTML(res.content).xpath( 1248 | "//a[contains(@class,'inline-flex items-center justify-center')]/@href") 1249 | if not token_url: 1250 | try: 1251 | url = cget('GET', url).url 1252 | p_url = urlparse(url) 1253 | res = cget( 1254 | "GET", f"{p_url.scheme}://{p_url.hostname}/ddl/{url.split('/')[-1]}") 1255 | except Exception as e: 1256 | return (f'ERROR: {e.__class__.__name__}') 1257 | if (drive_link := re.findall(r"myDl\('(.*?)'\)", res.text)) and "drive.google.com" in drive_link[0]: 1258 | return drive_link[0] 1259 | else: 1260 | return ( 1261 | 'ERROR: Drive Link not found, Try in your broswer') 1262 | token_url = token_url[0] 1263 | try: 1264 | token_page = cget('GET', token_url) 1265 | except Exception as e: 1266 | return ( 1267 | f'ERROR: {e.__class__.__name__} with {token_url}') 1268 | path = re.findall('\("(.*?)"\)', token_page.text) 1269 | if not path: 1270 | return ('ERROR: Cannot bypass this') 1271 | path = path[0] 1272 | raw = urlparse(token_url) 1273 | final_url = f'{raw.scheme}://{raw.hostname}{path}' 1274 | return ddl.sharer_scraper(final_url) 1275 | 1276 | 1277 | ################################################################## 1278 | # adfly 1279 | 1280 | def decrypt_url(code): 1281 | a, b = '', '' 1282 | for i in range(0, len(code)): 1283 | if i % 2 == 0: a += code[i] 1284 | else: b = code[i] + b 1285 | key = list(a + b) 1286 | i = 0 1287 | while i < len(key): 1288 | if key[i].isdigit(): 1289 | for j in range(i+1,len(key)): 1290 | if key[j].isdigit(): 1291 | u = int(key[i]) ^ int(key[j]) 1292 | if u < 10: key[i] = str(u) 1293 | i = j 1294 | break 1295 | i+=1 1296 | key = ''.join(key) 1297 | decrypted = base64.b64decode(key)[16:-16] 1298 | return decrypted.decode('utf-8') 1299 | 1300 | 1301 | def adfly(url): 1302 | client = cloudscraper.create_scraper(allow_brotli=False) 1303 | res = client.get(url).text 1304 | out = {'error': False, 'src_url': url} 1305 | try: 1306 | ysmm = re.findall("ysmm\s+=\s+['|\"](.*?)['|\"]", res)[0] 1307 | except: 1308 | out['error'] = True 1309 | return out 1310 | url = decrypt_url(ysmm) 1311 | if re.search(r'go\.php\?u\=', url): 1312 | url = base64.b64decode(re.sub(r'(.*?)u=', '', url)).decode() 1313 | elif '&dest=' in url: 1314 | url = unquote(re.sub(r'(.*?)dest=', '', url)) 1315 | out['bypassed_url'] = url 1316 | return out 1317 | 1318 | 1319 | ############################################################################################## 1320 | # gplinks 1321 | 1322 | def gplinks(url: str): 1323 | client = cloudscraper.create_scraper(allow_brotli=False) 1324 | token = url.split("/")[-1] 1325 | domain ="https://gplinks.co/" 1326 | referer = "https://mynewsmedia.co/" 1327 | vid = client.get(url, allow_redirects= False).headers["Location"].split("=")[-1] 1328 | url = f"{url}/?{vid}" 1329 | response = client.get(url, allow_redirects=False) 1330 | soup = BeautifulSoup(response.content, "html.parser") 1331 | inputs = soup.find(id="go-link").find_all(name="input") 1332 | data = { input.get('name'): input.get('value') for input in inputs } 1333 | time.sleep(10) 1334 | headers={"x-requested-with": "XMLHttpRequest"} 1335 | bypassed_url = client.post(domain+"links/go", data=data, headers=headers).json()["url"] 1336 | try: return bypassed_url 1337 | except: return 'Something went wrong :(' 1338 | 1339 | 1340 | ###################################################################################################### 1341 | # droplink 1342 | 1343 | def droplink(url): 1344 | client = cloudscraper.create_scraper(allow_brotli=False) 1345 | res = client.get(url, timeout=5) 1346 | 1347 | ref = re.findall("action[ ]{0,}=[ ]{0,}['|\"](.*?)['|\"]", res.text)[0] 1348 | h = {"referer": ref} 1349 | res = client.get(url, headers=h) 1350 | 1351 | bs4 = BeautifulSoup(res.content, "html.parser") 1352 | inputs = bs4.find_all("input") 1353 | data = {input.get("name"): input.get("value") for input in inputs} 1354 | h = { 1355 | "content-type": "application/x-www-form-urlencoded", 1356 | "x-requested-with": "XMLHttpRequest", 1357 | } 1358 | 1359 | p = urlparse(url) 1360 | final_url = f"{p.scheme}://{p.netloc}/links/go" 1361 | time.sleep(3.1) 1362 | res = client.post(final_url, data=data, headers=h).json() 1363 | 1364 | if res["status"] == "success": return res["url"] 1365 | return 'Something went wrong :(' 1366 | 1367 | 1368 | ##################################################################################################################### 1369 | # link vertise 1370 | 1371 | def linkvertise(url): 1372 | params = {'url': url,} 1373 | response = requests.get('https://bypass.pm/bypass2', params=params).json() 1374 | if response["success"]: return response["destination"] 1375 | else: return response["msg"] 1376 | 1377 | 1378 | ################################################################################################################### 1379 | # others 1380 | 1381 | def others(url): 1382 | return "API Currently not Available" 1383 | 1384 | 1385 | ################################################################################################################# 1386 | # ouo 1387 | 1388 | # RECAPTCHA v3 BYPASS 1389 | # code from https://github.com/xcscxr/Recaptcha-v3-bypass 1390 | def RecaptchaV3(ANCHOR_URL="https://www.google.com/recaptcha/api2/anchor?ar=1&k=6Lcr1ncUAAAAAH3cghg6cOTPGARa8adOf-y9zv2x&co=aHR0cHM6Ly9vdW8uaW86NDQz&hl=en&v=1B_yv3CBEV10KtI2HJ6eEXhJ&size=invisible&cb=4xnsug1vufyr"): 1391 | url_base = 'https://www.google.com/recaptcha/' 1392 | post_data = "v={}&reason=q&c={}&k={}&co={}" 1393 | client = requests.Session() 1394 | client.headers.update({ 1395 | 'content-type': 'application/x-www-form-urlencoded' 1396 | }) 1397 | matches = re.findall('([api2|enterprise]+)\/anchor\?(.*)', ANCHOR_URL)[0] 1398 | url_base += matches[0]+'/' 1399 | params = matches[1] 1400 | res = client.get(url_base+'anchor', params=params) 1401 | token = re.findall(r'"recaptcha-token" value="(.*?)"', res.text)[0] 1402 | params = dict(pair.split('=') for pair in params.split('&')) 1403 | post_data = post_data.format(params["v"], token, params["k"], params["co"]) 1404 | res = client.post(url_base+'reload', params=f'k={params["k"]}', data=post_data) 1405 | answer = re.findall(r'"rresp","(.*?)"', res.text)[0] 1406 | return answer 1407 | 1408 | 1409 | # code from https://github.com/xcscxr/ouo-bypass/ 1410 | def ouo(url): 1411 | client = requests.Session() 1412 | tempurl = url.replace("ouo.press", "ouo.io") 1413 | p = urlparse(tempurl) 1414 | id = tempurl.split('/')[-1] 1415 | 1416 | res = client.get(tempurl) 1417 | next_url = f"{p.scheme}://{p.hostname}/go/{id}" 1418 | 1419 | for _ in range(2): 1420 | if res.headers.get('Location'): 1421 | break 1422 | bs4 = BeautifulSoup(res.content, 'lxml') 1423 | inputs = bs4.form.findAll("input", {"name": re.compile(r"token$")}) 1424 | data = { input.get('name'): input.get('value') for input in inputs } 1425 | 1426 | ans = RecaptchaV3() 1427 | data['x-token'] = ans 1428 | h = { 1429 | 'content-type': 'application/x-www-form-urlencoded' 1430 | } 1431 | res = client.post(next_url, data=data, headers=h, allow_redirects=False) 1432 | next_url = f"{p.scheme}://{p.hostname}/xreallcygo/{id}" 1433 | 1434 | return res.headers.get('Location') 1435 | 1436 | 1437 | #################################################################################################################### 1438 | # mdisk 1439 | 1440 | def mdisk(url): 1441 | header = { 1442 | 'Accept': '*/*', 1443 | 'Accept-Language': 'en-US,en;q=0.5', 1444 | 'Accept-Encoding': 'gzip, deflate, br', 1445 | 'Referer': 'https://mdisk.me/', 1446 | 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36' 1447 | } 1448 | 1449 | inp = url 1450 | fxl = inp.split("/") 1451 | cid = fxl[-1] 1452 | 1453 | URL = f'https://diskuploader.entertainvideo.com/v1/file/cdnurl?param={cid}' 1454 | res = requests.get(url=URL, headers=header).json() 1455 | return res['download'] + '\n\n' + res['source'] 1456 | 1457 | 1458 | ################################################################################################################## 1459 | # AppDrive or DriveApp etc. Look-Alike Link and as well as the Account Details (Required for Login Required Links only) 1460 | 1461 | def unified(url): 1462 | 1463 | if ddl.is_share_link(url): 1464 | if 'https://gdtot' in url: return ddl.gdtot(url) 1465 | else: return ddl.sharer_scraper(url) 1466 | 1467 | try: 1468 | Email = "chzeesha4@gmail.com" 1469 | Password = "zeeshi#789" 1470 | 1471 | account = {"email": Email, "passwd": Password} 1472 | client = cloudscraper.create_scraper(allow_brotli=False) 1473 | client.headers.update( 1474 | { 1475 | "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36" 1476 | } 1477 | ) 1478 | data = {"email": account["email"], "password": account["passwd"]} 1479 | client.post(f"https://{urlparse(url).netloc}/login", data=data) 1480 | res = client.get(url) 1481 | key = re.findall('"key",\s+"(.*?)"', res.text)[0] 1482 | ddl_btn = etree.HTML(res.content).xpath("//button[@id='drc']") 1483 | info = re.findall(">(.*?)<\/li>", res.text) 1484 | info_parsed = {} 1485 | for item in info: 1486 | kv = [s.strip() for s in item.split(": ", maxsplit=1)] 1487 | info_parsed[kv[0].lower()] = kv[1] 1488 | info_parsed = info_parsed 1489 | info_parsed["error"] = False 1490 | info_parsed["link_type"] = "login" 1491 | headers = { 1492 | "Content-Type": f"multipart/form-data; boundary={'-'*4}_", 1493 | } 1494 | data = {"type": 1, "key": key, "action": "original"} 1495 | if len(ddl_btn): 1496 | info_parsed["link_type"] = "direct" 1497 | data["action"] = "direct" 1498 | while data["type"] <= 3: 1499 | boundary = f'{"-"*6}_' 1500 | data_string = "" 1501 | for item in data: 1502 | data_string += f"{boundary}\r\n" 1503 | data_string += f'Content-Disposition: form-data; name="{item}"\r\n\r\n{data[item]}\r\n' 1504 | data_string += f"{boundary}--\r\n" 1505 | gen_payload = data_string 1506 | try: 1507 | response = client.post(url, data=gen_payload, headers=headers).json() 1508 | break 1509 | except BaseException: 1510 | data["type"] += 1 1511 | if "url" in response: 1512 | info_parsed["gdrive_link"] = response["url"] 1513 | elif "error" in response and response["error"]: 1514 | info_parsed["error"] = True 1515 | info_parsed["error_message"] = response["message"] 1516 | else: 1517 | info_parsed["error"] = True 1518 | info_parsed["error_message"] = "Something went wrong :(" 1519 | if info_parsed["error"]: 1520 | return info_parsed 1521 | if "driveapp" in urlparse(url).netloc and not info_parsed["error"]: 1522 | res = client.get(info_parsed["gdrive_link"]) 1523 | drive_link = etree.HTML(res.content).xpath( 1524 | "//a[contains(@class,'btn')]/@href" 1525 | )[0] 1526 | info_parsed["gdrive_link"] = drive_link 1527 | info_parsed["src_url"] = url 1528 | if "drivehub" in urlparse(url).netloc and not info_parsed["error"]: 1529 | res = client.get(info_parsed["gdrive_link"]) 1530 | drive_link = etree.HTML(res.content).xpath( 1531 | "//a[contains(@class,'btn')]/@href" 1532 | )[0] 1533 | info_parsed["gdrive_link"] = drive_link 1534 | if "gdflix" in urlparse(url).netloc and not info_parsed["error"]: 1535 | res = client.get(info_parsed["gdrive_link"]) 1536 | drive_link = etree.HTML(res.content).xpath( 1537 | "//a[contains(@class,'btn')]/@href" 1538 | )[0] 1539 | info_parsed["gdrive_link"] = drive_link 1540 | 1541 | if "drivesharer" in urlparse(url).netloc and not info_parsed["error"]: 1542 | res = client.get(info_parsed["gdrive_link"]) 1543 | drive_link = etree.HTML(res.content).xpath( 1544 | "//a[contains(@class,'btn')]/@href" 1545 | )[0] 1546 | info_parsed["gdrive_link"] = drive_link 1547 | if "drivebit" in urlparse(url).netloc and not info_parsed["error"]: 1548 | res = client.get(info_parsed["gdrive_link"]) 1549 | drive_link = etree.HTML(res.content).xpath( 1550 | "//a[contains(@class,'btn')]/@href" 1551 | )[0] 1552 | info_parsed["gdrive_link"] = drive_link 1553 | if "drivelinks" in urlparse(url).netloc and not info_parsed["error"]: 1554 | res = client.get(info_parsed["gdrive_link"]) 1555 | drive_link = etree.HTML(res.content).xpath( 1556 | "//a[contains(@class,'btn')]/@href" 1557 | )[0] 1558 | info_parsed["gdrive_link"] = drive_link 1559 | if "driveace" in urlparse(url).netloc and not info_parsed["error"]: 1560 | res = client.get(info_parsed["gdrive_link"]) 1561 | drive_link = etree.HTML(res.content).xpath( 1562 | "//a[contains(@class,'btn')]/@href" 1563 | )[0] 1564 | info_parsed["gdrive_link"] = drive_link 1565 | if "drivepro" in urlparse(url).netloc and not info_parsed["error"]: 1566 | res = client.get(info_parsed["gdrive_link"]) 1567 | drive_link = etree.HTML(res.content).xpath( 1568 | "//a[contains(@class,'btn')]/@href" 1569 | )[0] 1570 | info_parsed["gdrive_link"] = drive_link 1571 | if info_parsed["error"]: 1572 | return "Faced an Unknown Error!" 1573 | return info_parsed["gdrive_link"] 1574 | except BaseException: 1575 | return "Unable to Extract GDrive Link" 1576 | 1577 | 1578 | ##################################################################################################### 1579 | # urls open 1580 | 1581 | def urlsopen(url): 1582 | client = cloudscraper.create_scraper(allow_brotli=False) 1583 | DOMAIN = "https://blogpost.viewboonposts.com/e998933f1f665f5e75f2d1ae0009e0063ed66f889000" 1584 | url = url[:-1] if url[-1] == '/' else url 1585 | code = url.split("/")[-1] 1586 | final_url = f"{DOMAIN}/{code}" 1587 | ref = "https://blog.textpage.xyz/" 1588 | h = {"referer": ref} 1589 | resp = client.get(final_url,headers=h) 1590 | soup = BeautifulSoup(resp.content, "html.parser") 1591 | inputs = soup.find_all("input") 1592 | data = { input.get('name'): input.get('value') for input in inputs } 1593 | h = { "x-requested-with": "XMLHttpRequest" } 1594 | time.sleep(2) 1595 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 1596 | try: return r.json()['url'] 1597 | except: return "Something went wrong :(" 1598 | 1599 | 1600 | #################################################################################################### 1601 | # URLShortX - xpshort 1602 | 1603 | def xpshort(url): 1604 | client = cloudscraper.create_scraper(allow_brotli=False) 1605 | DOMAIN = "https://xpshort.com" 1606 | url = url[:-1] if url[-1] == '/' else url 1607 | code = url.split("/")[-1] 1608 | final_url = f"{DOMAIN}/{code}" 1609 | ref = "https://m.awmnews.in/" 1610 | h = {"referer": ref} 1611 | resp = client.get(final_url,headers=h) 1612 | soup = BeautifulSoup(resp.content, "html.parser") 1613 | inputs = soup.find_all("input") 1614 | data = { input.get('name'): input.get('value') for input in inputs } 1615 | h = { "x-requested-with": "XMLHttpRequest" } 1616 | time.sleep(8) 1617 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 1618 | try: return r.json()['url'] 1619 | except: return "Something went wrong :(" 1620 | 1621 | 1622 | #################################################################################################### 1623 | # Vnshortner- 1624 | 1625 | def vnshortener(url): 1626 | client = cloudscraper.create_scraper(allow_brotli=False) 1627 | DOMAIN = "https://vnshortener.com/" 1628 | url = url[:-1] if url[-1] == '/' else url 1629 | code = url.split("/")[-1] 1630 | final_url = f"{DOMAIN}/{code}" 1631 | ref = "https://nishankhatri.com.np/" 1632 | h = {"referer": ref} 1633 | resp = client.get(final_url,headers=h) 1634 | soup = BeautifulSoup(resp.content, "html.parser") 1635 | inputs = soup.find_all("input") 1636 | data = { input.get('name'): input.get('value') for input in inputs } 1637 | h = { "x-requested-with": "XMLHttpRequest" } 1638 | time.sleep(8) 1639 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 1640 | try: return r.json()['url'] 1641 | except: return "Something went wrong :(" 1642 | 1643 | 1644 | ##################################################################################################### 1645 | # onepagelink 1646 | 1647 | def onepagelink(url): 1648 | client = cloudscraper.create_scraper(allow_brotli=False) 1649 | DOMAIN = "go.onepagelink.in" 1650 | url = url[:-1] if url[-1] == "/" else url 1651 | code = url.split("/")[-1] 1652 | final_url = f"https://{DOMAIN}/{code}" 1653 | ref = "gorating.in" 1654 | h = {"referer": ref} 1655 | response = client.get(final_url, headers=h) 1656 | soup = BeautifulSoup(response.text, "html.parser") 1657 | inputs = soup.find_all("input") 1658 | data = {input.get("name"): input.get("value") for input in inputs} 1659 | h = {"x-requested-with": "XMLHttpRequest"} 1660 | time.sleep(9) 1661 | r = client.post(f"https://{DOMAIN}/links/go", data=data, headers=h) 1662 | try: 1663 | return r.json()["url"] 1664 | except BaseException: 1665 | return "Something went wrong :(" 1666 | 1667 | 1668 | ##################################################################################################### 1669 | # dulink 1670 | 1671 | def dulink(url): 1672 | client = cloudscraper.create_scraper(allow_brotli=False) 1673 | DOMAIN = "https://du-link.in" 1674 | url = url[:-1] if url[-1] == '/' else url 1675 | ref = "https://profitshort.com/" 1676 | h = {"referer": ref} 1677 | resp = client.get(url, headers=h) 1678 | soup = BeautifulSoup(resp.content, "html.parser") 1679 | inputs = soup.find_all("input") 1680 | data = { input.get('name'): input.get('value') for input in inputs } 1681 | h = { "x-requested-with": "XMLHttpRequest" } 1682 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 1683 | try: return r.json()['url'] 1684 | except: return "Something went wrong :(" 1685 | 1686 | 1687 | ##################################################################################################### 1688 | # krownlinks 1689 | 1690 | def krownlinks(url): 1691 | client = requests.session() 1692 | DOMAIN = "https://tech.bloggertheme.xyz" 1693 | url = url[:-1] if url[-1] == '/' else url 1694 | code = url.split("/")[-1] 1695 | final_url = f"{DOMAIN}/{code}" 1696 | resp = client.get(final_url) 1697 | soup = BeautifulSoup(resp.content, "html.parser") 1698 | try: inputs = soup.find(id="go-link").find_all(name="input") 1699 | except: return "Incorrect Link" 1700 | data = { input.get('name'): input.get('value') for input in inputs } 1701 | h = { "x-requested-with": "XMLHttpRequest" } 1702 | time.sleep(10) 1703 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 1704 | try: return r.json()['url'] 1705 | except: return "Something went wrong :(" 1706 | 1707 | 1708 | #################################################################################################### 1709 | # adrinolink 1710 | 1711 | def adrinolink (url): 1712 | if "https://adrinolinks.in/" not in url: url = "https://adrinolinks.in/" + url.split("/")[-1] 1713 | client = cloudscraper.create_scraper(allow_brotli=False) 1714 | DOMAIN = "https://adrinolinks.in" 1715 | ref = "https://wikitraveltips.com/" 1716 | h = {"referer": ref} 1717 | resp = client.get(url,headers=h) 1718 | soup = BeautifulSoup(resp.content, "html.parser") 1719 | inputs = soup.find_all("input") 1720 | data = { input.get('name'): input.get('value') for input in inputs } 1721 | h = { "x-requested-with": "XMLHttpRequest" } 1722 | time.sleep(8) 1723 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 1724 | try: return r.json()['url'] 1725 | except: return "Something went wrong :(" 1726 | 1727 | 1728 | ##################################################################################################### 1729 | # mdiskshortners 1730 | 1731 | def mdiskshortners(url): 1732 | client = cloudscraper.create_scraper(allow_brotli=False) 1733 | DOMAIN = "https://mdiskshortners.in/" 1734 | url = url[:-1] if url[-1] == '/' else url 1735 | code = url.split("/")[-1] 1736 | final_url = f"{DOMAIN}/{code}" 1737 | ref = "https://www.adzz.in/" 1738 | h = {"referer": ref} 1739 | resp = client.get(final_url,headers=h) 1740 | soup = BeautifulSoup(resp.content, "html.parser") 1741 | inputs = soup.find_all("input") 1742 | data = { input.get('name'): input.get('value') for input in inputs } 1743 | h = { "x-requested-with": "XMLHttpRequest" } 1744 | time.sleep(2) 1745 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 1746 | try: return r.json()['url'] 1747 | except: return "Something went wrong :(" 1748 | 1749 | 1750 | ##################################################################################################### 1751 | # tinyfy 1752 | 1753 | def tiny(url): 1754 | client = requests.session() 1755 | DOMAIN = "https://tinyfy.in" 1756 | url = url[:-1] if url[-1] == '/' else url 1757 | code = url.split("/")[-1] 1758 | final_url = f"{DOMAIN}/{code}" 1759 | ref = "https://www.yotrickslog.tech/" 1760 | h = {"referer": ref} 1761 | resp = client.get(final_url,headers=h) 1762 | soup = BeautifulSoup(resp.content, "html.parser") 1763 | inputs = soup.find_all("input") 1764 | data = { input.get('name'): input.get('value') for input in inputs } 1765 | h = { "x-requested-with": "XMLHttpRequest" } 1766 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 1767 | try: return r.json()['url'] 1768 | except: return "Something went wrong :(" 1769 | 1770 | 1771 | ##################################################################################################### 1772 | # earnl 1773 | 1774 | def earnl(url): 1775 | client = requests.session() 1776 | DOMAIN = "https://v.earnl.xyz" 1777 | url = url[:-1] if url[-1] == '/' else url 1778 | code = url.split("/")[-1] 1779 | final_url = f"{DOMAIN}/{code}" 1780 | ref = "https://link.modmakers.xyz/" 1781 | h = {"referer": ref} 1782 | resp = client.get(final_url,headers=h) 1783 | soup = BeautifulSoup(resp.content, "html.parser") 1784 | inputs = soup.find_all("input") 1785 | data = { input.get('name'): input.get('value') for input in inputs } 1786 | h = { "x-requested-with": "XMLHttpRequest" } 1787 | time.sleep(5) 1788 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 1789 | try: 1790 | return r.json()['url'] 1791 | except: return "Something went wrong :(" 1792 | 1793 | 1794 | ##################################################################################################### 1795 | # moneykamalo 1796 | 1797 | def moneykamalo(url): 1798 | client = requests.session() 1799 | DOMAIN = "https://go.moneykamalo.com" 1800 | url = url[:-1] if url[-1] == '/' else url 1801 | code = url.split("/")[-1] 1802 | final_url = f"{DOMAIN}/{code}" 1803 | ref = "https://techkeshri.com/" 1804 | h = {"referer": ref} 1805 | resp = client.get(final_url,headers=h) 1806 | soup = BeautifulSoup(resp.content, "html.parser") 1807 | inputs = soup.find_all("input") 1808 | data = { input.get('name'): input.get('value') for input in inputs } 1809 | h = { "x-requested-with": "XMLHttpRequest" } 1810 | time.sleep(5) 1811 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 1812 | try: 1813 | return r.json()['url'] 1814 | except: return "Something went wrong :(" 1815 | 1816 | 1817 | ##################################################################################################### 1818 | # easysky 1819 | 1820 | def easysky(url): 1821 | client = cloudscraper.create_scraper(allow_brotli=False) 1822 | DOMAIN = "https://techy.veganab.co/" 1823 | url = url[:-1] if url[-1] == '/' else url 1824 | code = url.split("/")[-1] 1825 | final_url = f"{DOMAIN}/{code}" 1826 | ref = "https://veganab.co/" 1827 | h = {"referer": ref} 1828 | resp = client.get(final_url,headers=h) 1829 | soup = BeautifulSoup(resp.content, "html.parser") 1830 | inputs = soup.find_all("input") 1831 | data = { input.get('name'): input.get('value') for input in inputs } 1832 | h = { "x-requested-with": "XMLHttpRequest" } 1833 | time.sleep(8) 1834 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 1835 | try: return r.json()['url'] 1836 | except: return "Something went wrong :(" 1837 | 1838 | 1839 | ##################################################################################################### 1840 | # indiurl 1841 | 1842 | def indi(url): 1843 | client = requests.session() 1844 | DOMAIN = "https://file.earnash.com/" 1845 | url = url[:-1] if url[-1] == '/' else url 1846 | code = url.split("/")[-1] 1847 | final_url = f"{DOMAIN}/{code}" 1848 | ref = "https://indiurl.cordtpoint.co.in/" 1849 | h = {"referer": ref} 1850 | resp = client.get(final_url,headers=h) 1851 | soup = BeautifulSoup(resp.content, "html.parser") 1852 | inputs = soup.find_all("input") 1853 | data = { input.get('name'): input.get('value') for input in inputs } 1854 | h = { "x-requested-with": "XMLHttpRequest" } 1855 | time.sleep(10) 1856 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 1857 | try: 1858 | return r.json()['url'] 1859 | except: return "Something went wrong :(" 1860 | 1861 | 1862 | ##################################################################################################### 1863 | # linkbnao 1864 | 1865 | def linkbnao(url): 1866 | client = cloudscraper.create_scraper(allow_brotli=False) 1867 | DOMAIN = "https://vip.linkbnao.com" 1868 | url = url[:-1] if url[-1] == '/' else url 1869 | code = url.split("/")[-1] 1870 | final_url = f"{DOMAIN}/{code}" 1871 | ref = "https://ffworld.xyz/" 1872 | h = {"referer": ref} 1873 | resp = client.get(final_url,headers=h) 1874 | soup = BeautifulSoup(resp.content, "html.parser") 1875 | inputs = soup.find_all("input") 1876 | data = { input.get('name'): input.get('value') for input in inputs } 1877 | h = { "x-requested-with": "XMLHttpRequest" } 1878 | time.sleep(2) 1879 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 1880 | try: 1881 | return r.json()['url'] 1882 | except: return "Something went wrong :(" 1883 | 1884 | 1885 | ##################################################################################################### 1886 | # omegalinks 1887 | 1888 | def mdiskpro(url): 1889 | client = cloudscraper.create_scraper(allow_brotli=False) 1890 | DOMAIN = "https://mdisk.pro" 1891 | ref = "https://m.meclipstudy.in/" 1892 | h = {"referer": ref} 1893 | resp = client.get(url,headers=h) 1894 | soup = BeautifulSoup(resp.content, "html.parser") 1895 | inputs = soup.find_all("input") 1896 | data = { input.get('name'): input.get('value') for input in inputs } 1897 | h = { "x-requested-with": "XMLHttpRequest" } 1898 | time.sleep(8) 1899 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 1900 | try: 1901 | return r.json()['url'] 1902 | except: return "Something went wrong :(" 1903 | 1904 | 1905 | ##################################################################################################### 1906 | # tnshort 1907 | 1908 | def tnshort(url): 1909 | client = cloudscraper.create_scraper(allow_brotli=False) 1910 | DOMAIN = "https://page.tnlink.in/" 1911 | url = url[:-1] if url[-1] == '/' else url 1912 | code = url.split("/")[-1] 1913 | final_url = f"{DOMAIN}/{code}" 1914 | ref = "https://business.usanewstoday.club/" 1915 | h = {"referer": ref} 1916 | resp = client.get(final_url,headers=h) 1917 | soup = BeautifulSoup(resp.content, "html.parser") 1918 | inputs = soup.find_all("input") 1919 | data = { input.get('name'): input.get('value') for input in inputs } 1920 | h = { "x-requested-with": "XMLHttpRequest" } 1921 | time.sleep(8) 1922 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 1923 | try: return r.json()['url'] 1924 | except: return "Something went wrong :(" 1925 | 1926 | 1927 | ##################################################################################################### 1928 | # indianshortner 1929 | 1930 | def indshort(url): 1931 | client = cloudscraper.create_scraper(allow_brotli=False) 1932 | DOMAIN = "https://indianshortner.com/" 1933 | url = url[:-1] if url[-1] == '/' else url 1934 | code = url.split("/")[-1] 1935 | final_url = f"{DOMAIN}/{code}" 1936 | ref = "https://moddingzone.in/" 1937 | h = {"referer": ref} 1938 | resp = client.get(final_url,headers=h) 1939 | soup = BeautifulSoup(resp.content, "html.parser") 1940 | inputs = soup.find_all("input") 1941 | data = { input.get('name'): input.get('value') for input in inputs } 1942 | h = { "x-requested-with": "XMLHttpRequest" } 1943 | time.sleep(5) 1944 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 1945 | try: return r.json()['url'] 1946 | except: return "Something went wrong :(" 1947 | 1948 | 1949 | ##################################################################################################### 1950 | # mdisklink 1951 | 1952 | def mdisklink(url): 1953 | client = cloudscraper.create_scraper(allow_brotli=False) 1954 | DOMAIN = "https://mdisklink.link/" 1955 | url = url[:-1] if url[-1] == '/' else url 1956 | code = url.split("/")[-1] 1957 | final_url = f"{DOMAIN}/{code}" 1958 | ref = "https://m.proappapk.com/" 1959 | h = {"referer": ref} 1960 | resp = client.get(final_url,headers=h) 1961 | soup = BeautifulSoup(resp.content, "html.parser") 1962 | inputs = soup.find_all("input") 1963 | data = { input.get('name'): input.get('value') for input in inputs } 1964 | h = { "x-requested-with": "XMLHttpRequest" } 1965 | time.sleep(2) 1966 | r = client.post(f"{DOMAIN}/links/go", data=data, headers=h) 1967 | try: return r.json()['url'] 1968 | except: return "Something went wrong :(" 1969 | 1970 | 1971 | ##################################################################################################### 1972 | # rslinks 1973 | 1974 | def rslinks(url): 1975 | client = requests.session() 1976 | download = requests.get(url, stream=True, allow_redirects=False) 1977 | v = download.headers["location"] 1978 | code = v.split('ms9')[-1] 1979 | final = f"http://techyproio.blogspot.com/p/short.html?{code}==" 1980 | try: return final 1981 | except: return "Something went wrong :(" 1982 | 1983 | 1984 | ##################################################################################################### 1985 | # bitly + tinyurl 1986 | 1987 | def bitly_tinyurl(url: str) -> str: 1988 | response = requests.get(url).url 1989 | try: return response 1990 | except: return "Something went wrong :(" 1991 | 1992 | ##################################################################################################### 1993 | # thinfi 1994 | 1995 | def thinfi(url: str) -> str : 1996 | response = requests.get(url) 1997 | soup = BeautifulSoup(response.content, "html.parser").p.a.get("href") 1998 | try: return soup 1999 | except: return "Something went wrong :(" 2000 | 2001 | ##################################################################################################### 2002 | # helpers 2003 | 2004 | # check if present in list 2005 | def ispresent(inlist,url): 2006 | for ele in inlist: 2007 | if ele in url: 2008 | return True 2009 | return False 2010 | 2011 | 2012 | # shortners 2013 | def shortners(url): 2014 | 2015 | # igg games 2016 | if "https://igg-games.com/" in url: 2017 | print("entered igg: ",url) 2018 | return igggames(url) 2019 | 2020 | # ola movies 2021 | elif "https://olamovies." in url: 2022 | print("entered ola movies: ",url) 2023 | return olamovies(url) 2024 | 2025 | # katdrive 2026 | elif "https://katdrive." in url: 2027 | if KATCRYPT == "": 2028 | return "🚫 __You can't use this because__ **KATDRIVE_CRYPT** __ENV is not set__" 2029 | 2030 | print("entered katdrive: ",url) 2031 | return katdrive_dl(url, KATCRYPT) 2032 | 2033 | # kolop 2034 | elif "https://kolop." in url: 2035 | if KCRYPT == "": 2036 | return "🚫 __You can't use this because__ **KOLOP_CRYPT** __ENV is not set__" 2037 | 2038 | print("entered kolop: ",url) 2039 | return kolop_dl(url, KCRYPT) 2040 | 2041 | # hubdrive 2042 | elif "https://hubdrive." in url: 2043 | if HCRYPT == "": 2044 | return "🚫 __You can't use this because__ **HUBDRIVE_CRYPT** __ENV is not set__" 2045 | 2046 | print("entered hubdrive: ",url) 2047 | return hubdrive_dl(url, HCRYPT) 2048 | 2049 | # drivefire 2050 | elif "https://drivefire." in url: 2051 | if DCRYPT == "": 2052 | return "🚫 __You can't use this because__ **DRIVEFIRE_CRYPT** __ENV is not set__" 2053 | 2054 | print("entered drivefire: ",url) 2055 | return drivefire_dl(url, DCRYPT) 2056 | 2057 | # filecrypt 2058 | elif (("https://filecrypt.co/") in url or ("https://filecrypt.cc/" in url)): 2059 | print("entered filecrypt: ",url) 2060 | return filecrypt(url) 2061 | 2062 | # shareus 2063 | elif "shareus.io" in url or "shareus.in" in url: 2064 | print("entered shareus: ",url) 2065 | return shareus(url) 2066 | 2067 | # shortingly 2068 | elif "https://shortingly.in/" in url: 2069 | print("entered shortingly: ",url) 2070 | return shortingly(url) 2071 | 2072 | # vnshortner 2073 | elif "https://vnshortener.com/" in url: 2074 | print("entered vnshortener: ",url) 2075 | return vnshortener(url) 2076 | 2077 | # onepagelink 2078 | elif "https://onepagelink.in/" in url: 2079 | print("entered onepagelink: ",url) 2080 | return onepagelink(url) 2081 | 2082 | # gyanilinks 2083 | elif "https://gyanilinks.com/" in url or "https://gtlinks.me/" in url: 2084 | print("entered gyanilinks: ",url) 2085 | return gyanilinks(url) 2086 | 2087 | # flashlink 2088 | elif "https://go.flashlink.in" in url: 2089 | print("entered flashlink: ",url) 2090 | return flashl(url) 2091 | 2092 | # short2url 2093 | elif "https://short2url.in/" in url: 2094 | print("entered short2url: ",url) 2095 | return short2url(url) 2096 | 2097 | # shorte 2098 | elif "https://shorte.st/" in url: 2099 | print("entered shorte: ",url) 2100 | return sh_st_bypass(url) 2101 | 2102 | # psa 2103 | elif "https://psa.wf/" in url: 2104 | print("entered psa: ",url) 2105 | return psa_bypasser(url) 2106 | 2107 | # sharer pw 2108 | elif "https://sharer.pw/" in url: 2109 | if XSRF_TOKEN == "" or Laravel_Session == "": 2110 | return "🚫 __You can't use this because__ **XSRF_TOKEN** __and__ **Laravel_Session** __ENV is not set__" 2111 | 2112 | print("entered sharer: ",url) 2113 | return sharer_pw(url, Laravel_Session, XSRF_TOKEN) 2114 | 2115 | # gdtot url 2116 | elif "gdtot.cfd" in url: 2117 | print("entered gdtot: ",url) 2118 | return gdtot(url) 2119 | 2120 | # adfly 2121 | elif "https://adf.ly/" in url: 2122 | print("entered adfly: ",url) 2123 | out = adfly(url) 2124 | return out['bypassed_url'] 2125 | 2126 | # gplinks 2127 | elif "https://gplinks.co/" in url: 2128 | print("entered gplink: ",url) 2129 | return gplinks(url) 2130 | 2131 | # droplink 2132 | elif "https://droplink.co/" in url: 2133 | print("entered droplink: ",url) 2134 | return droplink(url) 2135 | 2136 | # linkvertise 2137 | elif "https://linkvertise.com/" in url: 2138 | print("entered linkvertise: ",url) 2139 | return linkvertise(url) 2140 | 2141 | # rocklinks 2142 | elif "https://rocklinks.net/" in url: 2143 | print("entered rocklinks: ",url) 2144 | return rocklinks(url) 2145 | 2146 | # ouo 2147 | elif "https://ouo.press/" in url: 2148 | print("entered ouo: ",url) 2149 | return ouo(url) 2150 | 2151 | # try2link 2152 | elif "https://try2link.com/" in url: 2153 | print("entered try2links: ",url) 2154 | return try2link_bypass(url) 2155 | 2156 | # urlsopen 2157 | elif "https://urlsopen." in url: 2158 | print("entered urlsopen: ",url) 2159 | return urlsopen(url) 2160 | 2161 | # xpshort 2162 | elif "https://xpshort.com/" in url or "https://push.bdnewsx.com/" in url or "https://techymozo.com/" in url: 2163 | print("entered xpshort: ",url) 2164 | return xpshort(url) 2165 | 2166 | # dulink 2167 | elif "https://du-link.in/" in url: 2168 | print("entered dulink: ",url) 2169 | return dulink(url) 2170 | 2171 | # ez4short 2172 | elif "https://ez4short.com/" in url: 2173 | print("entered ez4short: ",url) 2174 | return ez4(url) 2175 | 2176 | # krownlinks 2177 | elif "https://krownlinks.me/" in url: 2178 | print("entered krownlinks: ",url) 2179 | return krownlinks(url) 2180 | 2181 | # adrinolink 2182 | elif "https://adrinolinks." in url: 2183 | print("entered adrinolink: ",url) 2184 | return adrinolink(url) 2185 | 2186 | # tnlink 2187 | elif "https://link.tnlink.in/" in url: 2188 | print("entered tnlink: ",url) 2189 | return tnlink(url) 2190 | 2191 | # mdiskshortners 2192 | elif "https://mdiskshortners.in/" in url: 2193 | print("entered mdiskshortners: ",url) 2194 | return mdiskshortners(url) 2195 | 2196 | # tinyfy 2197 | elif "tinyfy.in" in url: 2198 | print("entered tinyfy: ",url) 2199 | return tiny(url) 2200 | 2201 | # earnl 2202 | elif "go.earnl.xyz" in url: 2203 | print("entered earnl: ",url) 2204 | return earnl(url) 2205 | 2206 | # moneykamalo 2207 | elif "earn.moneykamalo.com" in url: 2208 | print("entered moneykamalo: ",url) 2209 | return moneykamalo(url) 2210 | 2211 | # easysky 2212 | elif "m.easysky.in" in url: 2213 | print("entered easysky: ",url) 2214 | return easysky(url) 2215 | 2216 | # indiurl 2217 | elif "go.indiurl.in.net" in url: 2218 | print("entered indiurl: ",url) 2219 | return indi(url) 2220 | 2221 | # linkbnao 2222 | elif "linkbnao.com" in url: 2223 | print("entered linkbnao: ",url) 2224 | return linkbnao(url) 2225 | 2226 | # omegalinks 2227 | elif "mdisk.pro" in url: 2228 | print("entered mdiskpro: ",url) 2229 | return mdiskpro(url) 2230 | 2231 | # tnshort 2232 | elif "tnshort.in" in url: 2233 | print("entered tnshort: ",url) 2234 | return tnshort(url) 2235 | 2236 | # indianshortner 2237 | elif "indianshortner.in" in url: 2238 | print("entered indianshortner: ",url) 2239 | return indshort(url) 2240 | 2241 | # mdisklink 2242 | elif "mdisklink.link" in url: 2243 | print("entered mdisklink: ",url) 2244 | return mdisklink(url) 2245 | 2246 | # rslinks 2247 | elif "rslinks.net" in url: 2248 | print("entered rslinks: ",url) 2249 | return rslinks(url) 2250 | 2251 | # bitly + tinyurl 2252 | elif "bit.ly" in url or "tinyurl.com" in url: 2253 | print("entered bitly_tinyurl: ",url) 2254 | return bitly_tinyurl(url) 2255 | 2256 | # pdisk 2257 | elif "pdisk.pro" in url: 2258 | print("entered pdisk: ",url) 2259 | return pdisk(url) 2260 | 2261 | # thinfi 2262 | elif "thinfi.com" in url: 2263 | print("entered thinfi: ",url) 2264 | return thinfi(url) 2265 | 2266 | # htpmovies sharespark cinevood 2267 | elif "https://htpmovies." in url or 'https://sharespark.me/' in url or "https://cinevood." in url or "https://atishmkv." in url \ 2268 | or "https://teluguflix" in url or 'https://taemovies' in url or "https://toonworld4all" in url or "https://animeremux" in url: 2269 | print("entered htpmovies sharespark cinevood atishmkv: ",url) 2270 | return scrappers(url) 2271 | 2272 | # gdrive look alike 2273 | elif ispresent(gdlist,url): 2274 | print("entered gdrive look alike: ",url) 2275 | return unified(url) 2276 | 2277 | # others 2278 | elif ispresent(otherslist,url): 2279 | print("entered others: ",url) 2280 | return others(url) 2281 | 2282 | # else 2283 | else: return "Not in Supported Sites" 2284 | 2285 | 2286 | ################################################################################################################################ 2287 | --------------------------------------------------------------------------------