├── start.sh
├── FZBypass
├── core
│ ├── exceptions.py
│ ├── recaptcha.py
│ ├── bot_utils.py
│ ├── bypass_scrape.py
│ ├── bypass_ddl.py
│ ├── bypass_dlinks.py
│ └── bypass_checker.py
├── __main__.py
├── __init__.py
└── plugins
│ ├── executor.py
│ └── bypass.py
├── requirements.txt
├── Dockerfile
├── sample_config.env
├── .github
└── workflows
│ ├── ruff-format.yml
│ └── ruff-check.yml
├── LICENSE
├── update.py
├── .gitignore
├── FZNotebook
└── fzbypasser.ipynb
└── README.md
/start.sh:
--------------------------------------------------------------------------------
1 | if [ -d "/app/.heroku/" ] || [ -d "/app/.scalingo/" ]; then
2 | python3 -m FZBypass
3 | else
4 | python3 update.py && python3 -m FZBypass
5 | fi
6 |
--------------------------------------------------------------------------------
/FZBypass/core/exceptions.py:
--------------------------------------------------------------------------------
1 | class DDLException(Exception):
2 | """Not method found for extracting direct download link from the http link"""
3 |
4 | pass
5 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | aiohttp
2 | bs4
3 | cfscrape
4 | cloudscraper
5 | curl_cffi
6 | lxml
7 | httpx
8 | python-dotenv
9 | pyrofork
10 | requests
11 | tgcrypto
12 | urllib3==1.26
13 | uvloop
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.10-slim-buster
2 |
3 | WORKDIR /app
4 |
5 | RUN apt-get -qq update --fix-missing && apt-get -qq upgrade -y && apt-get install git -y
6 |
7 | COPY requirements.txt .
8 | RUN pip3 install --no-cache-dir -r requirements.txt
9 |
10 | COPY . .
11 |
12 | CMD ["bash","start.sh"]
13 |
--------------------------------------------------------------------------------
/sample_config.env:
--------------------------------------------------------------------------------
1 | # Required
2 | BOT_TOKEN = ""
3 | API_HASH = ""
4 | API_ID = ""
5 | AUTH_CHATS = "" # Separate multiple ids by space and : for topic_ids
6 |
7 | # Optional
8 | AUTO_BYPASS = ""
9 | OWNER_ID = "" # For personal use & Logs
10 | LARAVEL_SESSION = ""
11 | XSRF_TOKEN = ""
12 | GDTOT_CRYPT = ""
13 | HUBDRIVE_CRYPT = ""
14 | DRIVEFIRE_CRYPT = ""
15 | KATDRIVE_CRYPT = ""
16 | DIRECT_INDEX = ""
17 | TERA_COOKIE = ""
18 |
19 | # Update
20 | UPSTREAM_REPO = "https://github.com/SilentDemonSD/FZBypassBot"
21 | UPSTREAM_BRANCH = "main"
22 |
--------------------------------------------------------------------------------
/FZBypass/core/recaptcha.py:
--------------------------------------------------------------------------------
1 | from re import findall
2 | from requests import Session
3 |
4 |
5 | async def recaptchaV3(
6 | ANCHOR_URL="https://www.google.com/recaptcha/api2/anchor?ar=1&k=6Lcr1ncUAAAAAH3cghg6cOTPGARa8adOf-y9zv2x&co=aHR0cHM6Ly9vdW8ucHJlc3M6NDQz&hl=en&v=pCoGBhjs9s8EhFOHJFe8cqis&size=invisible&cb=ahgyd1gkfkhe",
7 | ):
8 | rs = Session()
9 | rs.headers.update({"content-type": "application/x-www-form-urlencoded"})
10 | matches = findall("([api2|enterprise]+)\/anchor\?(.*)", ANCHOR_URL)[0]
11 | url_base = "https://www.google.com/recaptcha/" + matches[0] + "/"
12 | params = matches[1]
13 | res = rs.get(url_base + "anchor", params=params)
14 | token = findall(r'"recaptcha-token" value="(.*?)"', res.text)[0]
15 | params = dict(pair.split("=") for pair in params.split("&"))
16 | res = rs.post(
17 | url_base + "reload",
18 | params=f'k={params["k"]}',
19 | data=f"v={params['v']}&reason=q&c={token}&k={params['k']}&co={params['co']}",
20 | )
21 | return findall(r'"rresp","(.*?)"', res.text)[0]
22 |
--------------------------------------------------------------------------------
/.github/workflows/ruff-format.yml:
--------------------------------------------------------------------------------
1 | name: Py Formatter
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - main
7 | workflow_dispatch:
8 |
9 | jobs:
10 | check-and-format:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - name: Checkout Repository
14 | uses: actions/checkout@v4
15 |
16 | - name: Set up Python
17 | uses: actions/setup-python@v5
18 | with:
19 | python-version: '3.10'
20 |
21 | - name: Install Ruff Dependencies
22 | run: pip install ruff
23 |
24 | - name: Check and Format Python Code
25 | run: ruff format
26 |
27 | - name: Commit Changes
28 | run: |
29 | git config --global user.name "github-actions[bot]"
30 | git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com"
31 | git add .
32 | git commit -m "Automated Code by GitHub Actions"
33 |
34 | - name: Push Changes
35 | uses: ad-m/github-push-action@master
36 | with:
37 | github_token: ${{ secrets.GITHUB_TOKEN }}
38 |
--------------------------------------------------------------------------------
/.github/workflows/ruff-check.yml:
--------------------------------------------------------------------------------
1 | name: Py Checker
2 |
3 | on:
4 | pull_request:
5 | branches:
6 | - main
7 | workflow_dispatch:
8 |
9 | jobs:
10 | check-and-format:
11 | runs-on: ubuntu-latest
12 | steps:
13 | - name: Checkout Repository
14 | uses: actions/checkout@v4
15 |
16 | - name: Set up Python
17 | uses: actions/setup-python@v5
18 | with:
19 | python-version: '3.10'
20 |
21 | - name: Install Ruff Dependencies
22 | run: pip install ruff
23 |
24 | - name: Check and Format Python Code
25 | run: ruff check --fix --exit-zero
26 |
27 | - name: Commit Changes
28 | run: |
29 | git config --global user.name "github-actions[bot]"
30 | git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com"
31 | git add .
32 | git commit -m "Automated Code by GitHub Actions"
33 |
34 | - name: Push Changes
35 | uses: ad-m/github-push-action@master
36 | with:
37 | github_token: ${{ secrets.GITHUB_TOKEN }}
38 |
39 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2023 Silent Demon SD ( MysterySD )
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/FZBypass/__main__.py:
--------------------------------------------------------------------------------
1 | from FZBypass import Bypass, LOGGER, Config
2 | from pyrogram import idle
3 | from pyrogram.filters import command, user
4 | from os import path as ospath, execl
5 | from asyncio import create_subprocess_exec
6 | from sys import executable
7 |
8 |
9 | @Bypass.on_message(command("restart") & user(Config.OWNER_ID))
10 | async def restart(client, message):
11 | restart_message = await message.reply("Restarting...")
12 | await (await create_subprocess_exec("python3", "update.py")).wait()
13 | with open(".restartmsg", "w") as f:
14 | f.write(f"{restart_message.chat.id}\n{restart_message.id}\n")
15 | try:
16 | execl(executable, executable, "-m", "FZBypass")
17 | except Exception:
18 | execl(executable, executable, "-m", "FZBypassBot/FZBypass")
19 |
20 |
21 | async def restart():
22 | if ospath.isfile(".restartmsg"):
23 | with open(".restartmsg") as f:
24 | chat_id, msg_id = map(int, f)
25 | try:
26 | await Bypass.edit_message_text(
27 | chat_id=chat_id, message_id=msg_id, text="Restarted !"
28 | )
29 | except Exception as e:
30 | LOGGER.error(e)
31 |
32 |
33 | Bypass.start()
34 | LOGGER.info("FZ Bot Started!")
35 | Bypass.loop.run_until_complete(restart())
36 | idle()
37 | Bypass.stop()
38 |
--------------------------------------------------------------------------------
/update.py:
--------------------------------------------------------------------------------
1 | from os import path as opath, getenv
2 | from logging import (
3 | StreamHandler,
4 | INFO,
5 | basicConfig,
6 | error as log_error,
7 | info as log_info,
8 | )
9 | from logging.handlers import RotatingFileHandler
10 | from subprocess import run as srun
11 | from dotenv import load_dotenv
12 |
13 | basicConfig(
14 | level=INFO,
15 | format="%(asctime)s - %(name)s - %(levelname)s - %(message)s [%(filename)s:%(lineno)d]",
16 | datefmt="%d-%b-%y %I:%M:%S %p",
17 | handlers=[
18 | RotatingFileHandler("log.txt", maxBytes=50000000, backupCount=10),
19 | StreamHandler(),
20 | ],
21 | )
22 | load_dotenv("config.env", override=True)
23 |
24 | UPSTREAM_REPO = getenv("UPSTREAM_REPO", "https://github.com/SilentDemonSD/FZBypassBot")
25 | UPSTREAM_BRANCH = getenv("UPSTREAM_BRANCH", "main")
26 |
27 | if UPSTREAM_REPO is not None:
28 | if opath.exists(".git"):
29 | srun(["rm", "-rf", ".git"])
30 |
31 | update = srun(
32 | [
33 | f"git init -q \
34 | && git config --global user.email drxxstrange@gmail.com \
35 | && git config --global user.name SilentDemonSD \
36 | && git add . \
37 | && git commit -sm update -q \
38 | && git remote add origin {UPSTREAM_REPO} \
39 | && git fetch origin -q \
40 | && git reset --hard origin/{UPSTREAM_BRANCH} -q"
41 | ],
42 | shell=True,
43 | )
44 |
45 | if update.returncode == 0:
46 | log_info("Successfully updated with latest commit from UPSTREAM_REPO")
47 | else:
48 | log_error(
49 | "Something went wrong while updating, check UPSTREAM_REPO if valid or not!"
50 | )
51 |
--------------------------------------------------------------------------------
/FZBypass/__init__.py:
--------------------------------------------------------------------------------
1 | from os import getenv
2 | from time import time
3 | from dotenv import load_dotenv
4 | from pyrogram import Client
5 | from pyrogram.enums import ParseMode
6 | from logging import getLogger, FileHandler, StreamHandler, INFO, ERROR, basicConfig
7 | from uvloop import install
8 |
9 | install()
10 | basicConfig(
11 | format="[%(asctime)s] [%(levelname)s] - %(message)s", # [%(filename)s:%(lineno)d]
12 | datefmt="%d-%b-%y %I:%M:%S %p",
13 | handlers=[FileHandler("log.txt"), StreamHandler()],
14 | level=INFO,
15 | )
16 |
17 | getLogger("pyrogram").setLevel(ERROR)
18 | LOGGER = getLogger(__name__)
19 |
20 | load_dotenv("config.env", override=True)
21 | BOT_START = time()
22 |
23 |
24 | class Config:
25 | BOT_TOKEN = getenv("BOT_TOKEN", "")
26 | API_HASH = getenv("API_HASH", "")
27 | API_ID = getenv("API_ID", "")
28 | if BOT_TOKEN == "" or API_HASH == "" or API_ID == "":
29 | LOGGER.critical("Variables Missing. Exiting Now...")
30 | exit(1)
31 | AUTO_BYPASS = getenv("AUTO_BYPASS", "False").lower() == "true"
32 | AUTH_CHATS = getenv("AUTH_CHATS", "").split()
33 | OWNER_ID = int(getenv("OWNER_ID", 0))
34 | DIRECT_INDEX = getenv("DIRECT_INDEX", "").rstrip("/")
35 | LARAVEL_SESSION = getenv("LARAVEL_SESSION", "")
36 | XSRF_TOKEN = getenv("XSRF_TOKEN", "")
37 | GDTOT_CRYPT = getenv("GDTOT_CRYPT", "")
38 | DRIVEFIRE_CRYPT = getenv("DRIVEFIRE_CRYPT", "")
39 | HUBDRIVE_CRYPT = getenv("HUBDRIVE_CRYPT", "")
40 | KATDRIVE_CRYPT = getenv("KATDRIVE_CRYPT", "")
41 | TERA_COOKIE = getenv("TERA_COOKIE", "")
42 |
43 |
44 | Bypass = Client(
45 | "FZ",
46 | api_id=Config.API_ID,
47 | api_hash=Config.API_HASH,
48 | bot_token=Config.BOT_TOKEN,
49 | plugins=dict(root="FZBypass/plugins"),
50 | parse_mode=ParseMode.HTML,
51 | )
52 |
--------------------------------------------------------------------------------
/FZBypass/core/bot_utils.py:
--------------------------------------------------------------------------------
1 | from pyrogram.filters import create
2 | from pyrogram.enums import MessageEntityType
3 | from re import search, match
4 | from requests import get as rget
5 | from urllib.parse import urlparse, parse_qs
6 | from FZBypass import Config
7 |
8 |
9 | async def auth_topic(_, __, message):
10 | for chat in Config.AUTH_CHATS:
11 | if ":" in chat:
12 | chat_id, topic_id = chat.split(":")
13 | if (
14 | int(chat_id) == message.chat.id
15 | and message.is_topic_message
16 | and message.topics
17 | and message.topics.id == int(topic_id)
18 | ):
19 | return True
20 | elif int(chat) == message.chat.id:
21 | return True
22 | return False
23 |
24 |
25 | AuthChatsTopics = create(auth_topic)
26 |
27 |
28 | async def auto_bypass(_, c, message):
29 | if (
30 | Config.AUTO_BYPASS
31 | and message.entities
32 | and not match(r"^\/(bash|shell)($| )", message.text)
33 | and any(
34 | enty.type in [MessageEntityType.TEXT_LINK, MessageEntityType.URL]
35 | for enty in message.entities
36 | )
37 | ):
38 | return True
39 | elif (
40 | not Config.AUTO_BYPASS
41 | and (txt := message.text)
42 | and match(rf"^\/(bypass|bp)(@{c.me.username})?($| )", txt)
43 | and not match(r"^\/(bash|shell)($| )", txt)
44 | ):
45 | return True
46 | return False
47 |
48 |
49 | BypassFilter = create(auto_bypass)
50 |
51 |
52 | def get_gdriveid(link):
53 | if "folders" in link or "file" in link:
54 | res = search(
55 | r"https:\/\/drive\.google\.com\/(?:drive(.*?)\/folders\/|file(.*?)?\/d\/)([-\w]+)",
56 | link,
57 | )
58 | return res.group(3)
59 | parsed = urlparse(link)
60 | return parse_qs(parsed.query)["id"][0]
61 |
62 |
63 | def get_dl(link, direct_mode=False):
64 | if direct_mode and not Config.DIRECT_INDEX:
65 | return "No Direct Index Added !"
66 | try:
67 | return rget(
68 | f"{Config.DIRECT_INDEX}/generate.aspx?id={get_gdriveid(link)}"
69 | ).json()["link"]
70 | except:
71 | return f"{Config.DIRECT_INDEX}/direct.aspx?id={get_gdriveid(link)}"
72 |
73 |
74 | def convert_time(seconds):
75 | mseconds = seconds * 1000
76 | periods = [("d", 86400000), ("h", 3600000), ("m", 60000), ("s", 1000), ("ms", 1)]
77 | result = ""
78 | for period_name, period_seconds in periods:
79 | if mseconds >= period_seconds:
80 | period_value, mseconds = divmod(mseconds, period_seconds)
81 | result += f"{int(period_value)}{period_name}"
82 | if result == "":
83 | return "0ms"
84 | return result
85 |
--------------------------------------------------------------------------------
/FZBypass/plugins/executor.py:
--------------------------------------------------------------------------------
1 | from os import path as ospath, getcwd, chdir
2 | from traceback import format_exc
3 | from textwrap import indent
4 | from io import StringIO, BytesIO
5 | from re import match
6 | from contextlib import redirect_stdout, suppress
7 | from asyncio.subprocess import PIPE
8 | from asyncio import create_subprocess_shell
9 | from pyrogram.filters import command, user
10 | from FZBypass import Config, Bypass, LOGGER
11 |
12 |
13 | @Bypass.on_message(command("bash") & user(Config.OWNER_ID))
14 | async def bash(_, message):
15 | msg = await get_result(eval, message)
16 | if len(str(msg)) > 2000:
17 | with BytesIO(str.encode(msg)) as out_file:
18 | out_file.name = "output.txt"
19 | await message.reply_document(out_file)
20 | else:
21 | LOGGER.info(f"OUTPUT: '{msg}'")
22 | if not msg or msg == "\n":
23 | msg = "MessageEmpty"
24 | elif not bool(match(r"<(blockquote|spoiler|b|i|code|s|u|/a)>", msg)):
25 | msg = f"
{msg}
"
26 | await message.reply(msg)
27 |
28 |
29 | async def get_result(func, message):
30 | content = message.text.split(maxsplit=1)[-1]
31 | if not content:
32 | return ""
33 | body = (
34 | "\n".join(content.split("\n")[1:-1])
35 | if content.startswith("```") and content.endswith("```")
36 | else content.strip("` \n")
37 | )
38 | env = {"__builtins__": globals()["__builtins__"], "bot": Bypass, "message": message}
39 |
40 | chdir(getcwd())
41 | with open(ospath.join(getcwd(), "FZBypass/temp.txt"), "w") as temp:
42 | temp.write(body)
43 |
44 | stdout = StringIO()
45 | to_compile = f'async def func():\n{indent(body, " ")}'
46 |
47 | try:
48 | exec(to_compile, env)
49 | except Exception as e:
50 | return f"{e.__class__.__name__}: {e}"
51 |
52 | func = env["func"]
53 | try:
54 | with redirect_stdout(stdout):
55 | func_return = await func()
56 | except Exception:
57 | value = stdout.getvalue()
58 | return f"{value}{format_exc()}"
59 | else:
60 | value = stdout.getvalue()
61 | result = None
62 | if func_return is None:
63 | if value:
64 | result = f"{value}"
65 | else:
66 | with suppress(Exception):
67 | result = f"{repr(eval(body, env))}"
68 | else:
69 | result = f"{value}{func_return}"
70 | if result:
71 | return result
72 |
73 |
74 | @Bypass.on_message(command("shell") & user(Config.OWNER_ID))
75 | async def shell(_, message):
76 | cmd = message.text.split(maxsplit=1)
77 | if len(cmd) == 1:
78 | await message.reply("No command to execute was given.")
79 | return
80 | cmd = cmd[1]
81 | proc = await create_subprocess_shell(cmd, stdout=PIPE, stderr=PIPE)
82 | stdout, stderr = await proc.communicate()
83 | stdout = stdout.decode().strip()
84 | stderr = stderr.decode().strip()
85 | reply = ""
86 | if len(stdout) != 0:
87 | reply += f"Stdout\n{stdout}
\n"
88 | LOGGER.info(f"Shell - {cmd} - {stdout}")
89 | if len(stderr) != 0:
90 | reply += f"Stderr\n{stderr}
"
91 | LOGGER.error(f"Shell - {cmd} - {stderr}")
92 | if len(reply) > 3000:
93 | with BytesIO(str.encode(reply)) as out_file:
94 | out_file.name = "shell_output.txt"
95 | await message.reply_document(out_file)
96 | elif len(reply) != 0:
97 | await message.reply(reply)
98 | else:
99 | await message.reply("No Reply")
100 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | share/python-wheels/
24 | *.egg-info/
25 | .installed.cfg
26 | *.egg
27 | MANIFEST
28 |
29 | # PyInstaller
30 | # Usually these files are written by a python script from a template
31 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
32 | *.manifest
33 | *.spec
34 |
35 | # Installer logs
36 | pip-log.txt
37 | pip-delete-this-directory.txt
38 |
39 | # Unit test / coverage reports
40 | htmlcov/
41 | .tox/
42 | .nox/
43 | .coverage
44 | .coverage.*
45 | .cache
46 | nosetests.xml
47 | coverage.xml
48 | *.cover
49 | *.py,cover
50 | .hypothesis/
51 | .pytest_cache/
52 | cover/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | .pybuilder/
76 | target/
77 |
78 | # Jupyter Notebook
79 | .ipynb_checkpoints
80 |
81 | # IPython
82 | profile_default/
83 | ipython_config.py
84 |
85 | # pyenv
86 | # For a library or package, you might want to ignore these files since the code is
87 | # intended to run in multiple environments; otherwise, check them in:
88 | # .python-version
89 |
90 | # pipenv
91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
94 | # install all needed dependencies.
95 | #Pipfile.lock
96 |
97 | # poetry
98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99 | # This is especially recommended for binary packages to ensure reproducibility, and is more
100 | # commonly ignored for libraries.
101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102 | #poetry.lock
103 |
104 | # pdm
105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106 | #pdm.lock
107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108 | # in version control.
109 | # https://pdm.fming.dev/#use-with-ide
110 | .pdm.toml
111 |
112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113 | __pypackages__/
114 |
115 | # Celery stuff
116 | celerybeat-schedule
117 | celerybeat.pid
118 |
119 | # SageMath parsed files
120 | *.sage.py
121 |
122 | # Environments
123 | .env
124 | .venv
125 | env/
126 | venv/
127 | ENV/
128 | env.bak/
129 | venv.bak/
130 |
131 | # Spyder project settings
132 | .spyderproject
133 | .spyproject
134 |
135 | # Rope project settings
136 | .ropeproject
137 |
138 | # mkdocs documentation
139 | /site
140 |
141 | # mypy
142 | .mypy_cache/
143 | .dmypy.json
144 | dmypy.json
145 |
146 | # Pyre type checker
147 | .pyre/
148 |
149 | # pytype static type analyzer
150 | .pytype/
151 |
152 | # Cython debug symbols
153 | cython_debug/
154 |
155 | # PyCharm
156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
158 | # and can be added to the global gitignore or merged into this file. For a more nuclear
159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
160 | #.idea/
161 |
--------------------------------------------------------------------------------
/FZNotebook/fzbypasser.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "nbformat": 4,
3 | "nbformat_minor": 0,
4 | "metadata": {
5 | "colab": {
6 | "provenance": [],
7 | "cell_execution_strategy": "setup",
8 | "authorship_tag": "ABX9TyN5OVCQUjjXi6g6HoA/WV0C",
9 | "include_colab_link": true
10 | },
11 | "kernelspec": {
12 | "name": "python3",
13 | "display_name": "Python 3"
14 | },
15 | "language_info": {
16 | "name": "python"
17 | }
18 | },
19 | "cells": [
20 | {
21 | "cell_type": "markdown",
22 | "metadata": {
23 | "id": "view-in-github",
24 | "colab_type": "text"
25 | },
26 | "source": [
27 | "
"
28 | ]
29 | },
30 | {
31 | "cell_type": "markdown",
32 | "source": [
33 | "# ***FZBypassBot***\n",
34 | "\n",
35 | "_A Elegant Fast Multi Threaded Bypass Telegram Bot for Bigger Deeds like Mass Bypass. Try Now, and Feel the Speedy Work._"
36 | ],
37 | "metadata": {
38 | "id": "VjfA_UU_0Mo4"
39 | }
40 | },
41 | {
42 | "cell_type": "code",
43 | "execution_count": null,
44 | "metadata": {
45 | "id": "YXlUcyRtgSCX",
46 | "cellView": "form"
47 | },
48 | "outputs": [],
49 | "source": [
50 | "#@title ***Install Deficiencies***\n",
51 | "\n",
52 | "!pip3 install -U pip uv\n",
53 | "!uv pip install --system aiohttp bs4 cfscrape cloudscraper curl_cffi lxml httpx python-dotenv requests urllib3==1.26 uvloop\n",
54 | "\n",
55 | "from IPython.display import clear_output, HTML, display\n",
56 | "clear_output()\n",
57 | "\n",
58 | "display(HTML(\"\"))"
59 | ]
60 | },
61 | {
62 | "cell_type": "code",
63 | "source": [
64 | "#@title ***Bypass Links***\n",
65 | "\n",
66 | "#@markdown ---\n",
67 | "\n",
68 | "URL = \"https://ronylink.com/JO3Ov8\" #@param {type:\"string\"}\n",
69 | "\n",
70 | "#@markdown ---\n",
71 | "\n",
72 | "try:\n",
73 | " from re import findall, match\n",
74 | " from time import sleep, time\n",
75 | " from asyncio import sleep as asleep\n",
76 | "\n",
77 | " from bs4 import BeautifulSoup\n",
78 | " from cloudscraper import create_scraper\n",
79 | "except ImportError:\n",
80 | " raise ValueError(\"ReRun Previous Cell, to Install Deficiencies !!\")\n",
81 | "\n",
82 | "async def transcript(url: str, DOMAIN: str, ref: str, sltime) -> str:\n",
83 | " code = url.rstrip(\"/\").split(\"/\")[-1]\n",
84 | " cget = create_scraper(allow_brotli=False).request\n",
85 | " resp = cget(\"GET\", f\"{DOMAIN}/{code}\", headers={\"referer\": ref})\n",
86 | " soup = BeautifulSoup(resp.content, \"html.parser\")\n",
87 | " data = {inp.get(\"name\"): inp.get(\"value\") for inp in soup.find_all(\"input\")}\n",
88 | " await asleep(sltime)\n",
89 | " resp = cget(\n",
90 | " \"POST\",\n",
91 | " f\"{DOMAIN}/links/go\",\n",
92 | " data=data,\n",
93 | " headers={\"x-requested-with\": \"XMLHttpRequest\"},\n",
94 | " )\n",
95 | " try:\n",
96 | " return resp.json()[\"url\"]\n",
97 | " except:\n",
98 | " print(\"Link Extraction Failed\")\n",
99 | "\n",
100 | "print(\"Bypassing...\")\n",
101 | "\n",
102 | "if bool(match(r\"https?:\\/\\/ronylink\\.\\S+\", URL)):\n",
103 | " blink = await transcript(\n",
104 | " URL, \"https://go.ronylink.com/\", \"https://livejankari.com/\", 9\n",
105 | " )\n",
106 | "from IPython.display import clear_output\n",
107 | "clear_output()\n",
108 | "print(\"Bypassed URL : \\n\", blink)"
109 | ],
110 | "metadata": {
111 | "cellView": "form",
112 | "id": "apFB3Bh6snhj"
113 | },
114 | "execution_count": null,
115 | "outputs": []
116 | }
117 | ]
118 | }
--------------------------------------------------------------------------------
/FZBypass/plugins/bypass.py:
--------------------------------------------------------------------------------
1 | from time import time
2 | from asyncio import create_task, gather, sleep as asleep
3 | from pyrogram.filters import command, user
4 | from pyrogram.types import (
5 | InlineKeyboardButton,
6 | InlineKeyboardMarkup,
7 | InlineQueryResultArticle,
8 | InputTextMessageContent,
9 | )
10 | from pyrogram.enums import MessageEntityType
11 | from pyrogram.errors import QueryIdInvalid
12 |
13 | from FZBypass import Config, Bypass, BOT_START
14 | from FZBypass.core.bypass_checker import direct_link_checker, is_excep_link
15 | from FZBypass.core.bot_utils import AuthChatsTopics, convert_time, BypassFilter
16 |
17 |
18 | @Bypass.on_message(command("start"))
19 | async def start_msg(client, message):
20 | await message.reply(
21 | f"""FZ Bypass Bot!
22 |
23 | A Powerful Elegant Multi Threaded Bot written in Python... which can Bypass Various Shortener Links, Scrape links, and More ...
24 |
25 | Bot Started {convert_time(time() - BOT_START)} ago...
26 |
27 | 🛃 Use Me Here : @CyberPunkGrp (Bypass Topic)""",
28 | quote=True,
29 | reply_markup=InlineKeyboardMarkup(
30 | [
31 | [
32 | InlineKeyboardButton("🎓 Dev", url="https://t.me/SilentDemonSD"),
33 | InlineKeyboardButton(
34 | "🔍 Deploy Own",
35 | url="https://github.com/SilentDemonSD/FZBypassBot",
36 | ),
37 | ]
38 | ]
39 | ),
40 | )
41 |
42 |
43 | @Bypass.on_message(BypassFilter & (user(Config.OWNER_ID) | AuthChatsTopics))
44 | async def bypass_check(client, message):
45 | uid = message.from_user.id
46 | if (reply_to := message.reply_to_message) and (
47 | reply_to.text is not None or reply_to.caption is not None
48 | ):
49 | txt = reply_to.text or reply_to.caption
50 | entities = reply_to.entities or reply_to.caption_entities
51 | elif Config.AUTO_BYPASS or len(message.text.split()) > 1:
52 | txt = message.text
53 | entities = message.entities
54 | else:
55 | return await message.reply("No Link Provided!")
56 |
57 | wait_msg = await message.reply("Bypassing...")
58 | start = time()
59 |
60 | link, tlinks, no = "", [], 0
61 | atasks = []
62 | for enty in entities:
63 | if enty.type == MessageEntityType.URL:
64 | link = txt[enty.offset : (enty.offset + enty.length)]
65 | elif enty.type == MessageEntityType.TEXT_LINK:
66 | link = enty.url
67 |
68 | if link:
69 | no += 1
70 | tlinks.append(link)
71 | atasks.append(create_task(direct_link_checker(link)))
72 | link = ""
73 |
74 | completed_tasks = await gather(*atasks, return_exceptions=True)
75 |
76 | parse_data = []
77 | for result, link in zip(completed_tasks, tlinks):
78 | if isinstance(result, Exception):
79 | bp_link = f"\n┖ Bypass Error: {result}"
80 | elif is_excep_link(link):
81 | bp_link = result
82 | elif isinstance(result, list):
83 | bp_link, ui = "", "┖"
84 | for ind, lplink in reversed(list(enumerate(result, start=1))):
85 | bp_link = f"\n{ui} {ind}x Bypass Link: {lplink}" + bp_link
86 | ui = "┠"
87 | else:
88 | bp_link = f"\n┖ Bypass Link: {result}"
89 |
90 | if is_excep_link(link):
91 | parse_data.append(f"{bp_link}\n\n━━━━━━━✦✗✦━━━━━━━\n\n")
92 | else:
93 | parse_data.append(
94 | f"┎ Source Link: {link}{bp_link}\n\n━━━━━━━✦✗✦━━━━━━━\n\n"
95 | )
96 |
97 | end = time()
98 |
99 | if len(parse_data) != 0:
100 | parse_data[-1] = (
101 | parse_data[-1]
102 | + f"┎ Total Links : {no}\n┠ Results In {convert_time(end - start)} !\n┖ By {message.from_user.mention} ( #ID{message.from_user.id} )"
103 | )
104 | tg_txt = "━━━━━━━✦✗✦━━━━━━━\n\n"
105 | for tg_data in parse_data:
106 | tg_txt += tg_data
107 | if len(tg_txt) > 4000:
108 | await wait_msg.edit(tg_txt, disable_web_page_preview=True)
109 | wait_msg = await message.reply(
110 | "Fetching...", reply_to_message_id=wait_msg.id
111 | )
112 | tg_txt = ""
113 | await asleep(2.5)
114 |
115 | if tg_txt != "":
116 | await wait_msg.edit(tg_txt, disable_web_page_preview=True)
117 | else:
118 | await wait_msg.delete()
119 |
120 |
121 | @Bypass.on_message(command("log") & user(Config.OWNER_ID))
122 | async def send_logs(client, message):
123 | await message.reply_document("log.txt", quote=True)
124 |
125 |
126 | @Bypass.on_inline_query()
127 | async def inline_query(client, query):
128 | answers = []
129 | string = query.query.lower()
130 | if string.startswith("!bp "):
131 | link = string.strip("!bp ")
132 | start = time()
133 | try:
134 | bp_link = await direct_link_checker(link, True)
135 | end = time()
136 |
137 | if not is_excep_link(link):
138 | bp_link = (
139 | f"┎ Source Link: {link}\n┃\n┖ Bypass Link: {bp_link}"
140 | )
141 | answers.append(
142 | InlineQueryResultArticle(
143 | title="✅️ Bypass Link Success !",
144 | input_message_content=InputTextMessageContent(
145 | f"{bp_link}\n\n✎﹏﹏﹏﹏﹏﹏﹏﹏﹏﹏﹏﹏﹏﹏﹏\n\n🧭 Took Only {convert_time(end - start)}",
146 | disable_web_page_preview=True,
147 | ),
148 | description=f"Bypass via !bp {link}",
149 | reply_markup=InlineKeyboardMarkup(
150 | [
151 | [
152 | InlineKeyboardButton(
153 | "Bypass Again",
154 | switch_inline_query_current_chat="!bp ",
155 | )
156 | ]
157 | ]
158 | ),
159 | )
160 | )
161 | except Exception as e:
162 | bp_link = f"Bypass Error: {e}"
163 | end = time()
164 |
165 | answers.append(
166 | InlineQueryResultArticle(
167 | title="❌️ Bypass Link Error !",
168 | input_message_content=InputTextMessageContent(
169 | f"┎ Source Link: {link}\n┃\n┖ {bp_link}\n\n✎﹏﹏﹏﹏﹏﹏﹏﹏﹏﹏﹏﹏﹏﹏﹏\n\n🧭 Took Only {convert_time(end - start)}",
170 | disable_web_page_preview=True,
171 | ),
172 | description=f"Bypass via !bp {link}",
173 | reply_markup=InlineKeyboardMarkup(
174 | [
175 | [
176 | InlineKeyboardButton(
177 | "Bypass Again",
178 | switch_inline_query_current_chat="!bp ",
179 | )
180 | ]
181 | ]
182 | ),
183 | )
184 | )
185 |
186 | else:
187 | answers.append(
188 | InlineQueryResultArticle(
189 | title="♻️ Bypass Usage: In Line",
190 | input_message_content=InputTextMessageContent(
191 | """FZ Bypass Bot!
192 |
193 | A Powerful Elegant Multi Threaded Bot written in Python... which can Bypass Various Shortener Links, Scrape links, and More ...
194 |
195 | 🎛 Inline Use : !bp [Single Link]""",
196 | ),
197 | description="Bypass via !bp [link]",
198 | reply_markup=InlineKeyboardMarkup(
199 | [
200 | [
201 | InlineKeyboardButton(
202 | "FZ Channel", url="https://t.me/FXTorrentz"
203 | ),
204 | InlineKeyboardButton(
205 | "Try Bypass", switch_inline_query_current_chat="!bp "
206 | ),
207 | ]
208 | ]
209 | ),
210 | )
211 | )
212 | try:
213 | await query.answer(results=answers, cache_time=0)
214 | except QueryIdInvalid:
215 | pass
216 |
--------------------------------------------------------------------------------
/FZBypass/core/bypass_scrape.py:
--------------------------------------------------------------------------------
1 | from asyncio import gather, create_task
2 | from re import search, match, sub
3 | from requests import get as rget
4 | from cloudscraper import create_scraper
5 | from urllib.parse import urlparse
6 | from bs4 import BeautifulSoup, NavigableString, Tag
7 |
8 | from FZBypass.core.bypass_ddl import transcript
9 |
10 |
11 | async def sharespark(url: str) -> str:
12 | gd_txt = ""
13 | cget = create_scraper().request
14 | res = cget("GET", "?action=printpage;".join(url.split("?")))
15 | soup = BeautifulSoup(res.text, "html.parser")
16 | for br in soup.findAll("br"):
17 | next_s = br.nextSibling
18 | if not (next_s and isinstance(next_s, NavigableString)):
19 | continue
20 | if (
21 | (next2_s := next_s.nextSibling)
22 | and isinstance(next2_s, Tag)
23 | and next2_s.name == "br"
24 | and str(next_s).strip()
25 | ):
26 | if match(r"^(480p|720p|1080p)(.+)? Links:\Z", next_s):
27 | gd_txt += f'{next_s.replace("Links:", "GDToT Links :")}\n\n'
28 | for s in next_s.split():
29 | ns = sub(r"\(|\)", "", s)
30 | if match(r"https?://.+\.gdtot\.\S+", ns):
31 | soup = BeautifulSoup(cget("GET", ns).text, "html.parser")
32 | parse_data = (
33 | (soup.select('meta[property^="og:description"]')[0]["content"])
34 | .replace("Download ", "")
35 | .rsplit("-", maxsplit=1)
36 | )
37 | gd_txt += f"┎ Name : {parse_data[0]}\n┠ Size : {parse_data[-1]}\n┃\n┖ GDTot : {ns}\n\n"
38 | elif match(r"https?://pastetot\.\S+", ns):
39 | nxt = sub(r"\(|\)|(https?://pastetot\.\S+)", "", next_s)
40 | gd_txt += f"\n{nxt}\n┖ {ns}\n"
41 | if len(gd_txt) > 4000:
42 | return gd_txt # Broken Function
43 | if gd_txt != "":
44 | return gd_txt
45 |
46 |
47 | async def skymovieshd(url: str) -> str:
48 | soup = BeautifulSoup(rget(url, allow_redirects=False).text, "html.parser")
49 | t = soup.select('div[class^="Robiul"]')
50 | gd_txt = f"{t[-1].text.replace('Download ', '')}"
51 | _cache = []
52 | for link in soup.select('a[href*="howblogs.xyz"]'):
53 | if link["href"] in _cache:
54 | continue
55 | _cache.append(link["href"])
56 | gd_txt += f"\n\n{link.text} : \n"
57 | nsoup = BeautifulSoup(
58 | rget(link["href"], allow_redirects=False).text, "html.parser"
59 | )
60 | atag = nsoup.select('div[class="cotent-box"] > a[href]')
61 | for no, link in enumerate(atag, start=1):
62 | gd_txt += f"{no}. {link['href']}\n"
63 | return gd_txt
64 |
65 |
66 | async def cinevood(url: str) -> str:
67 | soup = BeautifulSoup(rget(url).text, "html.parser")
68 | titles = soup.select("h6")
69 | links_by_title = {}
70 |
71 | # Extract the post title from the webpage's title
72 | post_title = soup.title.string.strip()
73 |
74 | for title in titles:
75 | title_text = title.text.strip()
76 | gdtot_links = title.find_next("a", href=lambda href: "gdtot" in href.lower())
77 | multiup_links = title.find_next(
78 | "a", href=lambda href: "multiup" in href.lower()
79 | )
80 | filepress_links = title.find_next(
81 | "a", href=lambda href: "filepress" in href.lower()
82 | )
83 | gdflix_links = title.find_next("a", href=lambda href: "gdflix" in href.lower())
84 | kolop_links = title.find_next("a", href=lambda href: "kolop" in href.lower())
85 | zipylink_links = title.find_next(
86 | "a", href=lambda href: "zipylink" in href.lower()
87 | )
88 |
89 | links = []
90 | if gdtot_links:
91 | links.append(
92 | f'GDToT'
93 | )
94 | if multiup_links:
95 | links.append(
96 | f'MultiUp'
97 | )
98 | if filepress_links:
99 | links.append(
100 | f'FilePress'
101 | )
102 | if gdflix_links:
103 | links.append(
104 | f'GDFlix'
105 | )
106 | if kolop_links:
107 | links.append(
108 | f'Kolop'
109 | )
110 | if zipylink_links:
111 | links.append(
112 | f'ZipyLink'
113 | )
114 |
115 | if links:
116 | links_by_title[title_text] = links
117 |
118 | prsd = f"🔖 Title: {post_title}\n"
119 | for title, links in links_by_title.items():
120 | prsd += f"\n┏🏷️ Name: {title}\n"
121 | prsd += "┗🔗 Links: " + " | ".join(links) + "\n"
122 |
123 | return prsd
124 |
125 |
126 | async def kayoanime(url: str) -> str:
127 | soup = BeautifulSoup(rget(url).text, "html.parser")
128 | titles = soup.select("h6")
129 | gdlinks = soup.select('a[href*="drive.google.com"], a[href*="tinyurl"]')
130 | prsd = f"{soup.title.string}"
131 | gd_txt, link = "GDrive", ""
132 | for n, gd in enumerate(gdlinks, start=1):
133 | if (link := gd["href"]) and "tinyurl" in link:
134 | link = rget(link).url
135 | domain = urlparse(link).hostname
136 | gd_txt = (
137 | "Mega"
138 | if "mega" in domain
139 | else "G Group"
140 | if "groups" in domain
141 | else "Direct Link"
142 | )
143 | prsd += f"""
144 |
145 | {n}. {gd.string}
146 | ┗ Links : {gd_txt}"""
147 | return prsd
148 |
149 |
150 | async def toonworld4all(url: str):
151 | if "/redirect/main.php?url=" in url:
152 | return f"┎ Source Link: {url}\n┃\n┖ Bypass Link: {rget(url).url}"
153 | xml = rget(url).text
154 | soup = BeautifulSoup(xml, "html.parser")
155 | if "/episode/" not in url:
156 | epl = soup.select('a[href*="/episode/"]')
157 | tls = soup.select('div[class*="mks_accordion_heading"]')
158 | stitle = search(r"\"name\":\"(.+)\"", xml).group(1).split('"')[0]
159 | prsd = f"{stitle}"
160 | for n, (t, l) in enumerate(zip(tls, epl), start=1):
161 | prsd += f"""
162 |
163 | {n}. {t.strong.string}
164 | ┖ Link : {l["href"]}"""
165 | return prsd
166 | links = soup.select('a[href*="/redirect/main.php?url="]')
167 | titles = soup.select("h5")
168 | prsd = f"{titles[0].string}"
169 | titles.pop(0)
170 | slicer, _ = divmod(len(links), len(titles))
171 | atasks = []
172 | for sl in links:
173 | nsl = ""
174 | while all(x not in nsl for x in ["rocklinks", "link1s"]):
175 | nsl = rget(sl["href"], allow_redirects=False).headers["location"]
176 | if "rocklinks" in nsl:
177 | atasks.append(
178 | create_task(
179 | transcript(
180 | nsl,
181 | "https://insurance.techymedies.com/",
182 | "https://highkeyfinance.com/",
183 | 5,
184 | )
185 | )
186 | )
187 | elif "link1s" in nsl:
188 | atasks.append(
189 | create_task(
190 | transcript(nsl, "https://link1s.com", "https://anhdep24.com/", 9)
191 | )
192 | )
193 |
194 | com_tasks = await gather(*atasks, return_exceptions=True)
195 | lstd = [com_tasks[i : i + slicer] for i in range(0, len(com_tasks), slicer)]
196 |
197 | for no, tl in enumerate(titles):
198 | prsd += f"\n\n{tl.string}\n┃\n┖ Links : "
199 | for tl, sl in zip(links, lstd[no]):
200 | if isinstance(sl, Exception):
201 | prsd += str(sl)
202 | else:
203 | prsd += f"{tl.string}, "
204 | prsd = prsd[:-2]
205 | return prsd
206 |
207 |
208 | async def tamilmv(url):
209 | cget = create_scraper().request
210 | resp = cget("GET", url)
211 | soup = BeautifulSoup(resp.text, "html.parser")
212 | mag = soup.select('a[href^="magnet:?xt=urn:btih:"]')
213 | tor = soup.select('a[data-fileext="torrent"]')
214 | parse_data = f"{soup.title.string}"
215 | for no, (t, m) in enumerate(zip(tor, mag), start=1):
216 | filename = sub(r"www\S+|\- |\.torrent", "", t.string)
217 | parse_data += f"""
218 |
219 | {no}. {filename}
220 | ┖ Links : Magnet 🧲 | Torrent 🌐"""
221 | return parse_data
222 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 | ## ***FZBypassBot***
9 |
10 |
A **Elegant Fast Multi Threaded Bypass Telegram Bot** for Bigger Deeds like Mass Bypass. Try Now, and Feel the Speedy Work.
11 |
12 | [**Demo Bot**](https://t.me/FZBypassBot) | [**Supported Sites**](#supported-sites) | [**Support Group**](https://t.me/FXTorrentz)
13 |
14 |
15 |
16 | ---
17 |
18 | ### ***Try Now for Free !***
19 | - _Use in Google Collab for Demo_
20 | > **Downside:** Multi Thread Bypass Not Supported
21 |
22 |
23 |
24 |
25 |
26 | ---
27 |
28 | ## ***Features***
29 | - _Fastest written in Async with Speed Enhancers_
30 | - _LoopBypass V1 (Auto Bypass Nested Shorteners)_
31 | - _Build with Simultaneously Bypass Method_
32 | - _Supported for Authorized Chats & Topics_
33 | - _Added Support for Inline Bypass ( Use anytime anywhere)_
34 | > **Enable:** BotFather -> Bot Settings -> Inline Mode (Turn On)
35 |
36 | ---
37 |
38 | ## ***Supported Sites***
39 | - All `Credits` to Respective Script Owner & Contributors
40 | - All these are Collected from the Internet / Web
41 |
42 | ### ***Shorten Sites***
43 |
44 | - Last Updated : Unknown
45 |
46 |
47 | Shortening SitesClick Here to Expand
48 |
49 | | __Shortener Sites__ | __Status__ |__Last Updated__ |
50 | |:------------------:|:----------:|:----------------:|
51 | |`adrinolinks.com`|✅️| **01-05-2024**|
52 | |`adsfly.in`|✅️| **01-05-2024**|
53 | |`anlinks.in`|️✅️| **22-04-2024**|
54 | |`appurl.io`|✅️| **01-05-2024**|
55 | |`bindaaslinks.com`|✅️| **29-04-2024**|
56 | |`bit.ly` + `tinyurl.com` + `*.short.gy` + `shorturl.ac` + `t.ly`|✅️| **01-05-2024**|
57 | |`bringlifes.com`|️️⚠️| **01-05-2024**|
58 | |`dalink.in`|️⚠️| **01-05-2024**|
59 | |`disk.yandex.ru` + `yandex.com`|✅️| **01-05-2024**|
60 | |`download.mdiskshortner.link`|✅️| **Unknown**|
61 | |`droplink.co`|✅️| **Unknown**|
62 | |`dtglinks.in`|✅️| **Unknown**|
63 | |`du-link.in` + `dulink.in`|✅️| **Unknown**|
64 | |`earn.moneykamalo.com`|✅️| **Unknown**|
65 | |`earn2me.com`|✅️| **Unknown**|
66 | |`earn2short.in`|✅️| **Unknown**|
67 | |`earn4link.in`|✅️|**Unknown**|
68 | |`evolinks.in`|✅| **22-04-2024**|
69 | |`ez4short.com`|✅️| **Unknown**|
70 | |`go.lolshort.tech`|❌️| **Unknown**|
71 | |`gtlinks.me` + `gyanilinks.com`|✅| **03-05-2024**|
72 | |`indianshortner.in`|✅️| **Unknown**|
73 | |`indyshare.net`|✅️| **Unknown**|
74 | |`instantearn.in`|✅️| **Unknown**|
75 | |`justpaste.it`|✅️| **24-06-2024**|
76 | |`kpslink.in`|✅️| **30-04-2024**|
77 | |`krownlinks.me`|✅️| **Unknown**|
78 | |`link.shorito.com`|❌️| **03-05-2024**|
79 | |`link.tnlink.in`|️✅️| **23-04-2024**|
80 | |`link.tnshort.net`|✅️| **04-05-2024**|
81 | |`link.vipurl.in` + `vipurl.in` + `count.vipurl.in`|✅️| **Unknown**|
82 | |`link1s.com`|✅️| **Unknown**|
83 | |`link4earn.com` + `link4earn.in`|✅️| **Unknown**|
84 | |`linkbanao.com`|❌️| **Unknown**|
85 | |`linkfly.me`|✅️| **Unknown**|
86 | |`linkjust.com`|✅️| **Unknown**|
87 | |`linkpays.in`|✅️| **Unknown**|
88 | |`linkshortx.in`|✅️| **03-05-2024**|
89 | |`linksly.co`|✅️| **Unknown**|
90 | |`linkvertise.com`|️❌️| **Unknown**|
91 | |`linksxyz.in`|️️✅️| **24-06-2024**|
92 | |`linkyearn.com`|❌️| **Unknown**|
93 | |`m.easysky.in`|✅| **23-04-2024**|
94 | |`m.narzolinks.click`|✅️| **Unknown**|
95 | |`mdisk.pro`|✅️| **01-05-2024**|
96 | |`mdiskshortner`|✅| **30-04-2024**|
97 | |`mediafire.com`|✅️| **24-06-2024**|
98 | |`modijiurl.com`|️❌️| **01-05-2024**|
99 | |`moneycase.link`|✅️| **Unknown**|
100 | |`mplaylink.com`|️❌️| **Unknown**|
101 | |`omnifly.in.net`|✅️| **Unknown**|
102 | |`onepagelink.in`|✅️| **Unknown**|
103 | |`ouo.io` + `ouo.press`|✅️| **Unknown**|
104 | |`pandaznetwork.com`|✅️| **Unknown**|
105 | |`pdisk.site`|✅️| **03-05-2024**|
106 | |`pdiskshortener.com`|✅️| **Unknown**|
107 | |`pkin.me` + `go.paisakamalo.in`|✅️| **Unknown**|
108 | |`publicearn.com`|❌️| **01-05-2024**|
109 | |`rocklinks.net`|✅️| **01-05-2024**|
110 | |`ronylink.com`|✅️| **24-04-2024**|
111 | |`rslinks.net`|❌️| **Unknown**|
112 | |`sheralinks.com`|✅️| **Unknown**|
113 | |`short.tnvalue.in`|✅️| **Unknown**|
114 | |`short2url.in`|✅️| **Unknown**|
115 | |`shortingly.com`|️✅️|️ **Unknown**|
116 | |`shrdsk.me`|️️✅️| **Unknown**|
117 | |`shrinke.me`|✅️| **30-04-2024**|
118 | |`shrinkforearn.xyz`|️❌️| **Unknown**|
119 | |`sklinks.in` + `sklinks.tech`|✅️| **Unknown**|
120 | |`surl.li`|✅️| **Unknown**|
121 | |`sxslink.com`|✅️| **Unknown**|
122 | |`tamizhmasters.com`|⚠️| **Unknown**|
123 | |`terabox.*` + `terabox.*` + `nephobox.*` + `4funbox.*` + `mirrobox.*` + `momerybox.*` + `teraboxapp.*`|✅️| **Unknown**|
124 | |`tglink.in`|✅️| **Unknown**|
125 | |`tinyfy.in`|✅️| **Unknown**|
126 | |`try2link.com`|✅️| **18-04-2024**|
127 | |`tulinks.one` + `go.tulinks.online` + `tulinks.online`|✅️| **Unknown**|
128 | |`url4earn.in`|✅️| **Unknown**|
129 | |`urllinkshort.in`|✅️| **Unknown**|
130 | |`urlsopen.com`|✅️| **Unknown**|
131 | |`urlspay.in`|✅️| **Unknown**|
132 | |`v2.kpslink.in`|✅️| **29-04-2024**|
133 | |`v2links.com`|️❌️| **Unknown**|
134 | |`viplinks.io`|️⚠️| **Unknown**|
135 | |`vplinks.in`|✅️| **29-04-2024**|
136 | |`www.dropbox.com`|✅️| **Unknown**|
137 | |`xpshort.com` + `push.bdnewsx.com` + `techymozo.com`|❌| **Unknown**|
138 | |`ziplinker.net`|✅️| **29-04-2024**|
139 |
140 |
141 | ### ***Scrape Sites***
142 | | __Websites__ | __Status__ |__Last Updated__ |
143 | |:------------:|:----------:|:----------------:|
144 | |`cinevood.*` **(Page)** |✅️| **Unknown**|
145 | |`kayoanime.com` **(Page)**|✅️| **Unknown**|
146 | |`skymovieshd.*`|✅️| **Unknown**|
147 | |`toonworld4all.*` **(Page + Episode)** |️❌️| **Unknown**|
148 | |`ww1.sharespark.cfd`|✅️| **Unknown**|
149 | |`www.1tamilmv.*` **(Page)**|✅️| **Unknown**|
150 |
151 |
152 |
153 | ### ***GDrive Sites***
154 | > _Fast Index Link is Supported for all GD Sites_
155 |
156 |
157 | GDrive SitesClick Here to Expand
158 |
159 | | __Websites__ | __Status__ | __Last Updated__ |
160 | |:------------:|:----------:|:----------------:|
161 | |`appdrive.*` **(File + Pack)**|✅️|**Unknown**|
162 | |`drivefire.co`|✅️|**Unknown**|
163 | |`*.gdflix.*`**(File + Pack)**|❌️|**Unknown**|
164 | ✅️|**Unknown**|
165 | |`hubdrive.lat` **(Instant Link)**|
166 | |`katdrive.org` **(Direct Download)**|✅️|**Unknown**|
167 | |`new*.gdtot.zip`|️❌️| **Unknown**|
168 | |`new*.filepress.store` + `filebee.xyz` + `onlystream.xyz` + `pressbee.xyz`**( Only Tg Links )**|✅️|**Unknown**|
169 | |`sharer.pw`|❌|**Unknown**|
170 |
171 |
172 |
173 | ---
174 |
175 | ## ***Deploy Guide***
176 | 1. `Heroku` or `Koyeb` or `Render` or `Scalingo` or _**More**_ _(Recommended)_
177 | - Use [pyTele-Loader](https://github.com/SilentDemonSD/pyTele-Loader) and Follow further Steps.
178 | - **Variables Values:**
179 | - `REPO_URL`: https://github.com/SilentDemonSD/FZBypassBot
180 | - `REPO_BRANCH`: main
181 | - `START_CMD`: bash start.sh
182 | 2. `VPS`
183 | - **Build And Run The Docker Image Using Official Docker Commands**
184 | - _Clone the Repository_
185 | ```
186 | git clone https://github.com/SilentDemonSD/FZBypassBot && cd FZBypassBot
187 | ```
188 | - _Build Docker image:_
189 | ```
190 | docker build . -t fzbypass
191 | ```
192 | - _Run the image:_
193 | ```
194 | docker run fzbypass
195 | ```
196 | - _To stop the running image:_
197 | - Check Running Containers
198 | ```
199 | docker ps
200 | ```
201 | - Get the ID and Stop the Container
202 | ```
203 | docker stop idxxxxx
204 | ```
205 | - _Add `sudo` at the Start of Each Command if your CLI is rooted_
206 | - _Add all Config Data in `config.env`_
207 | - _Update your Repo Directly, Make sure to fill Up `UPSTREAM_REPO` & `UPSTREAM_BRANCH`_
208 | ```
209 | docker restart idxxxxx
210 | ```
211 |
212 | ---
213 |
214 | ## ***Config Setup***
215 | - `BOT_TOKEN`: Telegram Bot Token that you got from BotFather.
216 | - `OWNER_ID`: Telegram User ID (not username) of the Owner of the bot.
217 | - `API_ID`: This is to authenticate your Telegram account for downloading Telegram files. You can get this from https://my.telegram.org.
218 | - `API_HASH`: This is to authenticate your Telegram account for downloading Telegram files. You can get this from https://my.telegram.org.
219 | - `AUTH_CHATS`: Group ID (with Topic ID), Separated by space.
220 | > **Format:** chat_id:topic_id chat_id chat_id:topic_id
221 | - `AUTO_BYPASS`: Change between Command Mode or Auto Bypass Mode. Default is False.
222 | - `GDTOT_CRYPT`: GdToT Crypt (Optional). It works with & without Crypt!
223 | - `HUBDRIVE_CRYPT`: HubDrive Crypt (Optional), It works with or without Cookie, Get from Cookie Editor Extension.
224 | - `KATDRIVE_CRYPT`: KatDrive Crypt (Optional), It works with or without Cookie, Get from Cookie Editor Extension.
225 | - `DRIVEFIRE_CRYPT`: DriveFire Crypt, Get from Cookie Editor Extension.
226 | - `DIRECT_INDEX`: Direct Fast Download GDrive Links.
227 | - Generate via [Google-Drive-Index](https://gitlab.com/GoogleDriveIndex/cloudflare-gdrive-download-worker/-/blob/main/src/worker.js). Follow further from inside the script. Copy & Deploy on [CF Workers](https://cloudflare.com)
228 | - Get Raw `Refresh Token` from [lavarel-google](https://github.com/ivanvermeyen/laravel-google-drive-demo/blob/master/README/2-getting-your-refresh-token.md)
229 | - `TERA_COOKIE`: Get the Terabox `ndus` Cookie from Cookie Editor Extension.
230 | - `LARAVEL_SESSION`: Get from `sharer.pw` Cookie for Login base.
231 | - `XSRF_TOKEN`: Get from `sharer.pw` Cookie for Login base.
232 | - `UPSTREAM_REPO`: Put Upstream Repo to Update. Defaults to `https://github.com/SilentDemonSD/FZBypassBot`
233 | - `UPSTREAM_BRANCH`: Put Branch Name. Defaults to `main`
234 |
235 | ---
236 |
237 | ## ***Contributions***
238 | - Open-source Project needs you to fill in the gaps for long term Usage
239 | - Try forking and Push your Codes and open a Pull Request !
240 | - If you want to help by providing Snippets or Scripts, Send to Me at Telegram [@MysterySD](t.me/MysterySD)
241 |
242 | **Thanks for Reading, yeh !!**
243 |
244 | ---
245 |
246 | ## ***Credits***
247 | - `SilentDemonSD` (Developer)
248 | - `Other Contributors` (Those who commited and Helped Internally)
249 | - `Link-Bypasser-Bot` (Many Scripts are Taken & Totally Modified)
250 |
--------------------------------------------------------------------------------
/FZBypass/core/bypass_ddl.py:
--------------------------------------------------------------------------------
1 | from requests import post as rpost ,get as rget
2 | from re import findall, compile
3 | from time import sleep, time
4 | from asyncio import sleep as asleep
5 | from urllib.parse import quote, urlparse
6 |
7 | from bs4 import BeautifulSoup
8 | from cloudscraper import create_scraper
9 | from curl_cffi.requests import Session as cSession
10 | from requests import Session, get as rget
11 | from aiohttp import ClientSession
12 |
13 | from FZBypass import Config
14 | from FZBypass.core.exceptions import DDLException
15 | from FZBypass.core.recaptcha import recaptchaV3
16 |
17 | async def get_readable_time(seconds):
18 | minutes, seconds = divmod(seconds, 60)
19 | hours, minutes = divmod(minutes, 60)
20 | return f"{hours}h{minutes}m{seconds}s"
21 |
22 |
23 | async def yandex_disk(url: str) -> str:
24 | cget = create_scraper().request
25 | try:
26 | return cget(
27 | "get",
28 | f"https://cloud-api.yandex.net/v1/disk/public/resources/download?public_key={url}",
29 | ).json()["href"]
30 | except KeyError:
31 | raise DDLException("File not Found / Download Limit Exceeded")
32 |
33 |
34 | async def mediafire(url: str):
35 | if final_link := findall(
36 | r"https?:\/\/download\d+\.mediafire\.com\/\S+\/\S+\/\S+", url
37 | ):
38 | return final_link[0]
39 | cget = create_scraper().request
40 | try:
41 | url = cget("get", url).url
42 | page = cget("get", url).text
43 | except Exception as e:
44 | raise DDLException(f"{e.__class__.__name__}")
45 | if final_link := findall(
46 | r"\'(https?:\/\/download\d+\.mediafire\.com\/\S+\/\S+\/\S+)\'", page
47 | ):
48 | return final_link[0]
49 | elif temp_link := findall(
50 | r'\/\/(www\.mediafire\.com\/file\/\S+\/\S+\/file\?\S+)', page
51 | ):
52 | return await mediafire("https://"+temp_link[0].strip('"'))
53 | else:
54 | raise DDLException("No links found in this page")
55 |
56 |
57 | async def shrdsk(url: str) -> str:
58 | cget = create_scraper().request
59 | try:
60 | url = cget("GET", url).url
61 | res = cget(
62 | "GET",
63 | f'https://us-central1-affiliate2apk.cloudfunctions.net/get_data?shortid={url.split("/")[-1]}',
64 | )
65 | except Exception as e:
66 | raise DDLException(f"{e.__class__.__name__}")
67 | if res.status_code != 200:
68 | raise DDLException(f"Status Code {res.status_code}")
69 | res = res.json()
70 | if "type" in res and res["type"].lower() == "upload" and "video_url" in res:
71 | return quote(res["video_url"], safe=":/")
72 | raise DDLException("No Direct Link Found")
73 |
74 |
75 | async def terabox(url: str) -> str:
76 | sess = Session()
77 |
78 | def retryme(url):
79 | while True:
80 | try:
81 | return sess.get(url)
82 | except:
83 | pass
84 |
85 | url = retryme(url).url
86 | key = url.split("?surl=")[-1]
87 | url = f"http://www.terabox.com/wap/share/filelist?surl={key}"
88 | sess.cookies.update({"ndus": Config.TERA_COOKIE})
89 |
90 | res = retryme(url)
91 | key = res.url.split("?surl=")[-1]
92 | soup = BeautifulSoup(res.content, "lxml")
93 | jsToken = None
94 |
95 | for fs in soup.find_all("script"):
96 | fstring = fs.string
97 | if fstring and fstring.startswith("try {eval(decodeURIComponent"):
98 | jsToken = fstring.split("%22")[1]
99 |
100 | res = retryme(
101 | f"https://www.terabox.com/share/list?app_id=250528&jsToken={jsToken}&shorturl={key}&root=1"
102 | )
103 | result = res.json()
104 | if result["errno"] != 0:
105 | raise DDLException(f"{result['errmsg']}' Check cookies")
106 | result = result["list"]
107 | if len(result) > 1:
108 | raise DDLException("Can't download mutiple files")
109 | result = result[0]
110 |
111 | if result["isdir"] != "0":
112 | raise DDLException("Can't download folder")
113 | try:
114 | return result["dlink"]
115 | except:
116 | raise DDLException("Link Extraction Failed")
117 |
118 | async def try2link(url: str) -> str:
119 | DOMAIN = 'https://try2link.com'
120 | code = url.split('/')[-1]
121 |
122 | async with ClientSession() as session:
123 | referers = ['https://hightrip.net/', 'https://to-travel.netl', 'https://world2our.com/']
124 | for referer in referers:
125 | async with session.get(f'{DOMAIN}/{code}', headers={"Referer": referer}) as res:
126 | if res.status == 200:
127 | html = await res.text()
128 | break
129 | soup = BeautifulSoup(html, "html.parser")
130 | inputs = soup.find(id="go-link").find_all(name="input")
131 | data = { input.get('name'): input.get('value') for input in inputs }
132 | await asleep(6)
133 | async with session.post(f"{DOMAIN}/links/go", data=data, headers={ "X-Requested-With": "XMLHttpRequest" }) as resp:
134 | if 'application/json' in resp.headers.get('Content-Type'):
135 | json_data = await resp.json()
136 | try:
137 | return json_data['url']
138 | except:
139 | raise DDLException("Link Extraction Failed")
140 |
141 |
142 | async def gyanilinks(url: str) -> str:
143 | '''
144 | Based on https://github.com/whitedemon938/Bypass-Scripts
145 | '''
146 | code = url.split('/')[-1]
147 | useragent = "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Mobile Safari/537.36"
148 | DOMAIN = "https://go.bloggingaro.com"
149 |
150 | async with ClientSession() as session:
151 | async with session.get(f"{DOMAIN}/{code}", headers={'Referer':'https://tech.hipsonyc.com/','User-Agent': useragent}) as res:
152 | cookies = res.cookies
153 | html = await res.text()
154 | async with session.get(f"{DOMAIN}/{code}", headers={'Referer':'https://hipsonyc.com/','User-Agent': useragent}, cookies=cookies) as resp:
155 | html = await resp.text()
156 | soup = BeautifulSoup(html, 'html.parser')
157 | data = {inp.get('name'): inp.get('value') for inp in soup.find_all('input')}
158 | await asleep(5)
159 | async with session.post(f"{DOMAIN}/links/go", data=data, headers={'X-Requested-With':'XMLHttpRequest','User-Agent': useragent, 'Referer': f"{DOMAIN}/{code}"}, cookies=cookies) as links:
160 | if 'application/json' in links.headers.get('Content-Type'):
161 | try:
162 | return (await links.json())['url']
163 | except Exception:
164 | raise DDLException("Link Extraction Failed")
165 |
166 |
167 | async def ouo(url: str):
168 | tempurl = url.replace("ouo.io", "ouo.press")
169 | p = urlparse(tempurl)
170 | id = tempurl.split("/")[-1]
171 | client = cSession(
172 | headers={
173 | "authority": "ouo.press",
174 | "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
175 | "accept-language": "en-GB,en-US;q=0.9,en;q=0.8",
176 | "cache-control": "max-age=0",
177 | "referer": "http://www.google.com/ig/adde?moduleurl=",
178 | "upgrade-insecure-requests": "1",
179 | }
180 | )
181 | res = client.get(tempurl, impersonate="chrome110")
182 | next_url = f"{p.scheme}://{p.hostname}/go/{id}"
183 |
184 | for _ in range(2):
185 | if res.headers.get("Location"):
186 | break
187 | bs4 = BeautifulSoup(res.content, "lxml")
188 | inputs = bs4.form.findAll("input", {"name": compile(r"token$")})
189 | data = {inp.get("name"): inp.get("value") for inp in inputs}
190 | data["x-token"] = await recaptchaV3()
191 | res = client.post(
192 | next_url,
193 | data=data,
194 | headers={"content-type": "application/x-www-form-urlencoded"},
195 | allow_redirects=False,
196 | impersonate="chrome110",
197 | )
198 | next_url = f"{p.scheme}://{p.hostname}/xreallcygo/{id}"
199 |
200 | return res.headers.get("Location")
201 |
202 |
203 | async def mdisk(url: str) -> str:
204 | """
205 | Depreciated ( Code Preserved )
206 | """
207 | header = {
208 | "Accept": "*/*",
209 | "Accept-Language": "en-US,en;q=0.5",
210 | "Accept-Encoding": "gzip, deflate, br",
211 | "Referer": "https://mdisk.me/",
212 | "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36",
213 | }
214 | URL = f'https://diskuploader.entertainvideo.com/v1/file/cdnurl?param={url.rstrip("/").split("/")[-1]}'
215 | res = rget(url=URL, headers=header).json()
216 | return res["download"] + "\n\n" + res["source"]
217 |
218 |
219 | async def transcript(url: str, DOMAIN: str, ref: str, sltime) -> str:
220 | code = url.rstrip("/").split("/")[-1]
221 | useragent = 'Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Mobile Safari/537.36'
222 |
223 | async with ClientSession() as session:
224 | async with session.get(f"{DOMAIN}/{code}", headers={'Referer': ref, 'User-Agent': useragent}) as res:
225 | html = await res.text()
226 | cookies = res.cookies
227 | soup = BeautifulSoup(html, "html.parser")
228 | title_tag = soup.find('title')
229 | if title_tag and title_tag.text == 'Just a moment...':
230 | return "Unable To Bypass Due To Cloudflare Protected"
231 | else:
232 | data = {inp.get('name'): inp.get('value') for inp in soup.find_all('input') if inp.get('name') and inp.get('value')}
233 | await asleep(sltime)
234 | async with session.post(f"{DOMAIN}/links/go", data=data, headers={'Referer': f"{DOMAIN}/{code}", 'X-Requested-With':'XMLHttpRequest', 'User-Agent': useragent}, cookies=cookies) as resp:
235 | try:
236 | if 'application/json' in resp.headers.get('Content-Type'):
237 | return (await resp.json())['url']
238 | except Exception:
239 | raise DDLException("Link Extraction Failed")
240 |
241 |
242 | async def justpaste(url: str):
243 | resp = rget(url, verify=False)
244 | soup = BeautifulSoup(resp.text, "html.parser")
245 | inps = soup.select('div[id="articleContent"] > p')
246 | return ", ".join(elem.string for elem in inps)
247 |
248 |
249 | async def linksxyz(url: str):
250 | resp = rget(url)
251 | soup = BeautifulSoup(resp.text, "html.parser")
252 | inps = soup.select('div[id="redirect-info"] > a')
253 | return inps[0]["href"]
254 |
255 |
256 | async def shareus(url: str) -> str:
257 | DOMAIN = f"https://api.shrslink.xyz"
258 | code = url.split('/')[-1]
259 | headers = {
260 | 'User-Agent':'Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Mobile Safari/537.36',
261 | 'Origin':'https://shareus.io',
262 | }
263 | api = f"{DOMAIN}/v?shortid={code}&initial=true&referrer="
264 | id = rget(api, headers=headers).json()['sid']
265 | if id:
266 | api_2 = f"{DOMAIN}/get_link?sid={id}"
267 | res = rget(api_2, headers=headers)
268 | if res:
269 | return res.json()['link_info']['destination']
270 | else:
271 | raise DDLException("Link Extraction Failed")
272 | else:
273 | raise DDLException("ID Error")
274 |
275 |
276 | async def dropbox(url: str) -> str:
277 | return (
278 | url.replace("www.", "")
279 | .replace("dropbox.com", "dl.dropboxusercontent.com")
280 | .replace("?dl=0", "")
281 | )
282 |
283 |
284 | async def linkvertise(url: str) -> str:
285 | resp = rget("https://bypass.pm/bypass2", params={"url": url}).json()
286 | if resp["success"]:
287 | return resp["destination"]
288 | else:
289 | raise DDLException(resp["msg"])
290 |
291 |
292 | async def rslinks(url: str) -> str:
293 | resp = rget(url, stream=True, allow_redirects=False)
294 | code = resp.headers["location"].split("ms9")[-1]
295 | try:
296 | return f"http://techyproio.blogspot.com/p/short.html?{code}=="
297 | except:
298 | raise DDLException("Link Extraction Failed")
299 |
300 |
301 | async def shorter(url: str) -> str:
302 | try:
303 | cget = create_scraper().request
304 | resp = cget("GET", url, allow_redirects=False)
305 | return resp.headers["Location"]
306 | except:
307 | raise DDLException("Link Extraction Failed")
308 |
309 |
310 | async def appurl(url: str):
311 | cget = create_scraper().request
312 | resp = cget("GET", url, allow_redirects=False)
313 | soup = BeautifulSoup(resp.text, "html.parser")
314 | return soup.select('meta[property="og:url"]')[0]["content"]
315 |
316 |
317 | async def surl(url: str):
318 | cget = create_scraper().request
319 | resp = cget("GET", f"{url}+")
320 | soup = BeautifulSoup(resp.text, "html.parser")
321 | return soup.select('p[class="long-url"]')[0].string.split()[1]
322 |
323 |
324 | async def thinfi(url: str) -> str:
325 | try:
326 | return BeautifulSoup(rget(url).content, "html.parser").p.a.get("href")
327 | except:
328 | raise DDLException("Link Extraction Failed")
329 |
--------------------------------------------------------------------------------
/FZBypass/core/bypass_dlinks.py:
--------------------------------------------------------------------------------
1 | from base64 import b64decode
2 | from asyncio import create_task, gather
3 | from re import findall, DOTALL
4 | from urllib.parse import urlparse
5 | from uuid import uuid4
6 |
7 | from bs4 import BeautifulSoup
8 | from cloudscraper import create_scraper
9 | from lxml import etree
10 | from requests import Session
11 | from aiohttp import ClientSession
12 |
13 | from FZBypass import LOGGER, Config
14 | from FZBypass.core.bot_utils import get_dl
15 | from FZBypass.core.exceptions import DDLException
16 |
17 |
18 | async def filepress(url: str):
19 | cget = create_scraper().request
20 | try:
21 | url = cget("GET", url).url
22 | raw = urlparse(url)
23 | async with ClientSession() as sess:
24 | json_data = {
25 | "id": raw.path.split("/")[-1],
26 | "method": "publicDownlaod",
27 | }
28 | # async with await sess.post(f'{raw.scheme}://{raw.hostname}/api/file/downlaod/', headers={'Referer': f'{raw.scheme}://{raw.hostname}'}, json=json_data) as resp:
29 | # d_id = await resp.json()
30 | # if d_id.get('data', False):
31 | # dl_link = f"https://drive.google.com/uc?id={d_id['data']}&export=download"
32 | # parsed = BeautifulSoup(cget('GET', dl_link).content, 'html.parser').find('span')
33 | # combined = str(parsed).rsplit('(', maxsplit=1)
34 | # name, size = combined[0], combined[1].replace(')', '') + 'B'
35 | # else:
36 | # dl_link = "Unavailable" if d_id["statusText"] == "Bad Request" else d_id["statusText"]
37 | # name, size = "N/A", "N/A"
38 | del json_data["method"]
39 | async with await sess.post(
40 | f"{raw.scheme}://{raw.hostname}/api/file/telegram/downlaod/",
41 | headers={"Referer": f"{raw.scheme}://{raw.hostname}"},
42 | json=json_data,
43 | ) as resp:
44 | tg_id = await resp.json()
45 | if tg_id.get("data", False):
46 | t_url = f"https://tghub.xyz/?start={tg_id['data']}"
47 | bot_name = findall(
48 | "filepress_[a-zA-Z0-9]+_bot", cget("GET", t_url).text
49 | )[0]
50 | tg_link = f"https://t.me/{bot_name}/?start={tg_id['data']}"
51 | else:
52 | tg_link = (
53 | "Unavailable"
54 | if tg_id["statusText"] == "Ok"
55 | else tg_id["statusText"]
56 | )
57 | except Exception as e:
58 | raise DDLException(f"{e.__class__.__name__}")
59 | if tg_link == "Unavailable":
60 | tg_link_text = "Unavailable"
61 | else:
62 | tg_link_text = f'Click Here'
63 |
64 | parse_txt = f"""┏FilePress: Click Here
65 | ┗Telegram: {tg_link_text}"""
66 | # if "drive.google.com" in dl_link and Config.DIRECT_INDEX:
67 | # parse_txt += f"┠Temp Index: Click Here\n"
68 | # parse_txt += f"┗GDrive: Click Here"
69 | return parse_txt
70 |
71 |
72 | async def gdtot(url):
73 | cget = create_scraper().request
74 | try:
75 | url = cget("GET", url).url
76 | p_url = urlparse(url)
77 | res = cget(
78 | "POST",
79 | f"{p_url.scheme}://{p_url.hostname}/ddl",
80 | data={"dl": str(url.split("/")[-1])},
81 | )
82 | except Exception as e:
83 | raise DDLException(f"{e.__class__.__name__}")
84 | if (
85 | drive_link := findall(r"myDl\('(.*?)'\)", res.text)
86 | ) and "drive.google.com" in drive_link[0]:
87 | d_link = drive_link[0]
88 | elif Config.GDTOT_CRYPT:
89 | cget("GET", url, cookies={"crypt": Config.GDTOT_CRYPT})
90 | p_url = urlparse(url)
91 | js_script = cget(
92 | "POST",
93 | f"{p_url.scheme}://{p_url.hostname}/dld",
94 | data={"dwnld": url.split("/")[-1]},
95 | )
96 | g_id = findall("gd=(.*?)&", js_script.text)
97 | try:
98 | decoded_id = b64decode(str(g_id[0])).decode("utf-8")
99 | except:
100 | raise DDLException(
101 | "Try in your browser, mostly file not found or user limit exceeded!"
102 | )
103 | d_link = f"https://drive.google.com/open?id={decoded_id}"
104 | else:
105 | raise DDLException(
106 | "Drive Link not found, Try in your broswer! GDTOT_CRYPT not Provided!"
107 | )
108 | soup = BeautifulSoup(cget("GET", url).content, "html.parser")
109 | parse_data = (
110 | (soup.select('meta[property^="og:description"]')[0]["content"])
111 | .replace("Download ", "")
112 | .rsplit("-", maxsplit=1)
113 | )
114 | parse_txt = f"""┏Name: {parse_data[0]}
115 | ┠Size: {parse_data[-1]}
116 | ┠GDToT: Click Here
117 | """
118 | if Config.DIRECT_INDEX:
119 | parse_txt += f"┠Temp Index: Click Here\n"
120 | parse_txt += f"┗GDrive: Click Here"
121 | return parse_txt
122 |
123 |
124 | async def drivescript(url, crypt, dtype):
125 | rs = Session()
126 | resp = rs.get(url)
127 | title = findall(r">(.*?)<\/h4>", resp.text)[0]
128 | size = findall(r">(.*?)<\/td>", resp.text)[1]
129 | p_url = urlparse(url)
130 |
131 | dlink = ""
132 | if dtype != "DriveFire":
133 | try:
134 | js_query = rs.post(
135 | f"{p_url.scheme}://{p_url.hostname}/ajax.php?ajax=direct-download",
136 | data={"id": str(url.split("/")[-1])},
137 | headers={"x-requested-with": "XMLHttpRequest"},
138 | ).json()
139 | if str(js_query["code"]) == "200":
140 | dlink = f"{p_url.scheme}://{p_url.hostname}{js_query['file']}"
141 | except Exception as e:
142 | LOGGER.error(e)
143 |
144 | if not dlink and crypt:
145 | rs.get(url, cookies={"crypt": crypt})
146 | try:
147 | js_query = rs.post(
148 | f"{p_url.scheme}://{p_url.hostname}/ajax.php?ajax=download",
149 | data={"id": str(url.split("/")[-1])},
150 | headers={"x-requested-with": "XMLHttpRequest"},
151 | ).json()
152 | except Exception as e:
153 | raise DDLException(f"{e.__class__.__name__}")
154 | if str(js_query["code"]) == "200":
155 | dlink = f"{p_url.scheme}://{p_url.hostname}{js_query['file']}"
156 |
157 | if dlink:
158 | res = rs.get(dlink)
159 | soup = BeautifulSoup(res.text, "html.parser")
160 | gd_data = soup.select('a[class="btn btn-primary btn-user"]')
161 | parse_txt = f"""┏Name: {title}
162 | ┠Size: {size}
163 | ┠{dtype}: Click Here"""
164 | if dtype == "HubDrive":
165 | parse_txt += (
166 | f"""\n┠Instant: Click Here"""
167 | )
168 | if (d_link := gd_data[0]["href"]) and Config.DIRECT_INDEX:
169 | parse_txt += (
170 | f"\n┠Temp Index: Click Here"
171 | )
172 | parse_txt += f"\n┗GDrive: Click Here"
173 | return parse_txt
174 | elif not dlink and not crypt:
175 | raise DDLException(
176 | f"{dtype} Crypt Not Provided and Direct Link Generate Failed"
177 | )
178 | else:
179 | raise DDLException(f'{js_query["file"]}')
180 |
181 |
182 | async def appflix(url):
183 | async def appflix_single(url):
184 | cget = create_scraper().request
185 | url = cget("GET", url).url
186 | soup = BeautifulSoup(
187 | cget("GET", url, allow_redirects=False).text, "html.parser"
188 | )
189 | ss = soup.select("li[class^='list-group-item']")
190 | dbotv2 = (
191 | dbot[0]["href"]
192 | if "gdflix" in url and (dbot := soup.select("a[href*='drivebot.lol']"))
193 | else None
194 | )
195 | try:
196 | d_link = await sharer_scraper(url)
197 | except Exception as e:
198 | if not dbotv2:
199 | raise DDLException(e)
200 | else:
201 | d_link = str(e)
202 | parse_txt = f"""┏Name: {ss[0].string.split(":")[1]}
203 | ┠Size: {ss[2].string.split(":")[1]}
204 | ┠Source: {url}"""
205 | if dbotv2:
206 | parse_txt += f"\n┠DriveBot V2: Click Here"
207 | if d_link and Config.DIRECT_INDEX:
208 | parse_txt += (
209 | f"\n┠Temp Index: Click Here"
210 | )
211 | parse_txt += f"\n┗GDrive: Click Here"
212 | return parse_txt
213 |
214 | if "/pack/" in url:
215 | cget = create_scraper().request
216 | url = cget("GET", url).url
217 | soup = BeautifulSoup(cget("GET", url).content, "html.parser")
218 | p_url = urlparse(url)
219 | body = ""
220 | atasks = [
221 | create_task(
222 | appflix_single(f"{p_url.scheme}://{p_url.hostname}" + ss["href"])
223 | )
224 | for ss in soup.select("a[href^='/file/']")
225 | ]
226 | completed_tasks = await gather(*atasks, return_exceptions=True)
227 | for bp_link in completed_tasks:
228 | if isinstance(bp_link, Exception):
229 | body += "\n\n" + f"Error: {bp_link}"
230 | else:
231 | body += "\n\n" + bp_link
232 | return f"""┏Name: {soup.title.string}
233 | ┗Source: {url}{body}"""
234 | return await appflix_single(url)
235 |
236 |
237 | async def sharerpw(url: str, force=False):
238 | if not Config.XSRF_TOKEN and not Config.LARAVEL_SESSION:
239 | raise DDLException("XSRF_TOKEN or LARAVEL_SESSION not Provided!")
240 | cget = create_scraper(allow_brotli=False).request
241 | resp = cget(
242 | "GET",
243 | url,
244 | cookies={
245 | "XSRF-TOKEN": Config.XSRF_TOKEN,
246 | "laravel_session": Config.LARAVEL_SESSION,
247 | },
248 | )
249 | parse_txt = findall(">(.*?)<\/td>", resp.text)
250 | ddl_btn = etree.HTML(resp.content).xpath("//button[@id='btndirect']")
251 | token = findall("_token\s=\s'(.*?)'", resp.text, DOTALL)[0]
252 | data = {"_token": token}
253 | if not force:
254 | data["nl"] = 1
255 | headers = {
256 | "content-type": "application/x-www-form-urlencoded; charset=UTF-8",
257 | "x-requested-with": "XMLHttpRequest",
258 | }
259 | try:
260 | res = cget("POST", url + "/dl", headers=headers, data=data).json()
261 | except Exception as e:
262 | raise DDLException(str(e))
263 | parse_data = f"""┏Name: {parse_txt[2]}
264 | ┠Size: {parse_txt[8]}
265 | ┠Added On: {parse_txt[11]}
266 | """
267 | if res["status"] == 0:
268 | if Config.DIRECT_INDEX:
269 | parse_data += (
270 | f"\n┠Temp Index: Click Here"
271 | )
272 | return parse_data + f"\n┗GDrive: Click Here"
273 | elif res["status"] == 2:
274 | msg = res["message"].replace("
", "\n")
275 | return parse_data + f"\n┗Error: {msg}"
276 | if len(ddl_btn) and not force:
277 | return await sharerpw(url, force=True)
278 |
279 |
280 | async def sharer_scraper(url):
281 | cget = create_scraper().request
282 | try:
283 | url = cget("GET", url).url
284 | raw = urlparse(url)
285 | header = {
286 | "useragent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/7.0.548.0 Safari/534.10"
287 | }
288 | res = cget("GET", url, headers=header)
289 | except Exception as e:
290 | raise DDLException(f"{e.__class__.__name__}")
291 | key = findall('"key",\s+"(.*?)"', res.text)
292 | if not key:
293 | raise DDLException("Download Link Key not found!")
294 | key = key[0]
295 | if not etree.HTML(res.content).xpath("//button[@id='drc']"):
296 | raise DDLException("Link don't have direct download button")
297 | boundary = uuid4()
298 | headers = {
299 | "Content-Type": f"multipart/form-data; boundary=----WebKitFormBoundary{boundary}",
300 | "x-token": raw.hostname,
301 | "useragent": "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/7.0.548.0 Safari/534.10",
302 | }
303 |
304 | data = (
305 | f'------WebKitFormBoundary{boundary}\r\nContent-Disposition: form-data; name="action"\r\n\r\ndirect\r\n'
306 | f'------WebKitFormBoundary{boundary}\r\nContent-Disposition: form-data; name="key"\r\n\r\n{key}\r\n'
307 | f'------WebKitFormBoundary{boundary}\r\nContent-Disposition: form-data; name="action_token"\r\n\r\n\r\n'
308 | f"------WebKitFormBoundary{boundary}--\r\n"
309 | )
310 | try:
311 | res = cget("POST", url, cookies=res.cookies, headers=headers, data=data).json()
312 | except Exception as e:
313 | raise DDLException(f"{e.__class__.__name__}")
314 | if "url" not in res:
315 | raise DDLException("Drive Link not found, Try in your browser")
316 | if "drive.google.com" in res["url"]:
317 | return res["url"]
318 | try:
319 | res = cget("GET", res["url"])
320 | except Exception as e:
321 | raise DDLException(f"ERROR: {e.__class__.__name__}")
322 | if (
323 | drive_link := etree.HTML(res.content).xpath("//a[contains(@class,'btn')]/@href")
324 | ) and "drive.google.com" in drive_link[0]:
325 | return drive_link[0]
326 | else:
327 | raise DDLException("Drive Link not found, Try in your browser")
328 |
--------------------------------------------------------------------------------
/FZBypass/core/bypass_checker.py:
--------------------------------------------------------------------------------
1 | from re import match
2 | from urllib.parse import urlparse
3 |
4 | from FZBypass.core.bypass_dlinks import *
5 | from FZBypass.core.bypass_ddl import *
6 | from FZBypass.core.bypass_scrape import *
7 | from FZBypass.core.bot_utils import get_dl
8 | from FZBypass.core.exceptions import DDLException
9 |
10 | fmed_list = [
11 | "fembed.net",
12 | "fembed.com",
13 | "femax20.com",
14 | "fcdn.stream",
15 | "feurl.com",
16 | "layarkacaxxi.icu",
17 | "naniplay.nanime.in",
18 | "naniplay.nanime.biz",
19 | "naniplay.com",
20 | "mm9842.com",
21 | ]
22 |
23 |
24 | def is_share_link(url):
25 | return bool(
26 | match(
27 | r"https?:\/\/.+\.(gdtot|filepress|pressbee|gdflix)\.\S+|https?:\/\/(gdflix|filepress|pressbee|onlystream|filebee|appdrive)\.\S+",
28 | url,
29 | )
30 | )
31 |
32 |
33 | def is_excep_link(url):
34 | return bool(
35 | match(
36 | r"https?:\/\/.+\.(1tamilmv|gdtot|filepress|pressbee|gdflix|sharespark)\.\S+|https?:\/\/(sharer|onlystream|hubdrive|katdrive|drivefire|skymovieshd|toonworld4all|kayoanime|cinevood|gdflix|filepress|pressbee|filebee|appdrive)\.\S+",
37 | url,
38 | )
39 | )
40 |
41 |
42 | async def direct_link_checker(link, onlylink=False):
43 | domain = urlparse(link).hostname
44 |
45 | # File Hoster Links
46 | if bool(match(r"https?:\/\/(yadi|disk.yandex)\.\S+", link)):
47 | return await yandex_disk(link)
48 | elif bool(match(r"https?:\/\/.+\.mediafire\.\S+", link)):
49 | return await mediafire(link)
50 | elif bool(match(r"https?:\/\/shrdsk\.\S+", link)):
51 | return await shrdsk(link)
52 | elif any(
53 | x in domain
54 | for x in [
55 | "1024tera",
56 | "terabox",
57 | "nephobox",
58 | "4funbox",
59 | "mirrobox",
60 | "momerybox",
61 | "teraboxapp",
62 | ]
63 | ):
64 | return await terabox(link)
65 | elif "drive.google.com" in link:
66 | return get_dl(link, True)
67 |
68 | # DDL Links
69 | elif bool(match(r"https?:\/\/try2link\.\S+", link)):
70 | blink = await try2link(link)
71 | elif bool(match(r"https?:\/\/(gyanilinks|gtlinks)\.\S+", link)):
72 | blink = await gyanilinks(link)
73 |
74 | elif bool(match(r"https?:\/\/adrinolinks\.\S+", link)):
75 | blink = await transcript(
76 | link, "https://adrinolinks.in", "https://bhojpuritop.in/", 8
77 | )
78 | elif bool(match(r"https?:\/\/adsfly\.\S+", link)):
79 | blink = await transcript(
80 | link, "https://go.adsfly.in/", "https://letest25.co/", 3
81 | )
82 | elif bool(match(r"https?:\/\/(.+\.)?anlinks\.\S+", link)):
83 | blink = await transcript(
84 | link, "https://anlinks.in/", "https://dsblogs.fun/", 8
85 | )
86 |
87 | elif bool(match(r"https?:\/\/ronylink\.\S+", link)):
88 | blink = await transcript(
89 | link, "https://go.ronylink.com/", "https://livejankari.com/", 3
90 | )
91 |
92 | elif bool(match(r"https?:\/\/.+\.evolinks\.\S+", link)):
93 | blink = await transcript(
94 | link, "https://ads.evolinks.in/" , link, 3
95 | )
96 | elif bool(match(r"https?:\/\/.+\.tnshort\.\S+", link)):
97 | blink = await transcript(
98 | link, "https://news.sagenews.in/", "https://movies.djnonstopmusic.in/", 5
99 | )
100 | elif bool(match(r"https?:\/\/(xpshort|push.bdnewsx|techymozo)\.\S+", link)):
101 | blink = await transcript(
102 | link, "https://xpshort.com/", "https://www.comptegratuite.com/", 4.9
103 | )
104 | elif bool(match(r"https?:\/\/go.lolshort\.\S+", link)):
105 | blink = await transcript(
106 | link, "https://get.lolshort.tech/", "https://tech.animezia.com/", 8
107 | )
108 | elif bool(match(r"https?:\/\/onepagelink\.\S+", link)):
109 | blink = await transcript(
110 | link, "https://go.onepagelink.in/", "https://gorating.in/", 3.1
111 | )
112 | elif bool(match(r"https?:\/\/earn.moneykamalo\.\S+", link)):
113 | blink = await transcript(
114 | link, "https://go.moneykamalo.com/", "https://bloging.techkeshri.com/", 4
115 | )
116 | elif bool(match(r"https?:\/\/droplink\.\S+", link)):
117 | blink = await transcript(
118 | link, "https://droplink.co/", "https://yoshare.net/", 3.1
119 | )
120 | elif bool(match(r"https?:\/\/tinyfy\.\S+", link)):
121 | blink = await transcript(
122 | link, "https://tinyfy.in", "https://www.yotrickslog.tech/", 0
123 | )
124 | elif bool(match(r"https?:\/\/krownlinks\.\S+", link)):
125 | blink = await transcript(
126 | link, "https://go.hostadviser.net/", "blog.hostadviser.net/", 8
127 | )
128 | elif bool(match(r"https?:\/\/(du-link|dulink)\.\S+", link)):
129 | blink = await transcript(
130 | link, "https://du-link.in", "https://profitshort.com/", 0
131 | )
132 | elif bool(match(r"https?:\/\/indianshortner\.\S+", link)):
133 | blink = await transcript(
134 | link, "https://indianshortner.com/", "https://moddingzone.in/", 5
135 | )
136 | elif bool(match(r"https?:\/\/m.easysky\.\S+", link)):
137 | blink = await transcript(
138 | link, "https://techy.veganab.co/", "https://camdigest.com/", 5
139 | )
140 | blink = await transcript(
141 | link, "https://vip.linkbnao.com", "https://ffworld.xyz/", 2
142 | )
143 | elif bool(match(r"https?:\/\/.+\.tnlink\.\S+", link)):
144 | blink = await transcript(
145 | link, "https://news.sagenews.in/", "https://knowstuff.in/", 5
146 | )
147 | elif bool(match(r"https?:\/\/link4earn\.\S+", link)):
148 | blink = await transcript(
149 | link, "https://link4earn.com", "https://studyis.xyz/", 6
150 | )
151 | elif bool(match(r"https?:\/\/shortingly\.\S+", link)):
152 | blink = await transcript(
153 | link, "https://go.blogytube.com/", "https://blogytube.com/", 5
154 | )
155 | elif bool(match(r"https?:\/\/short2url\.\S+", link)):
156 | blink = await transcript(
157 | link, "https://techyuth.xyz/blog", "https://blog.coin2pay.xyz/", 10
158 | )
159 | elif bool(match(r"https?:\/\/urlsopen\.\S+", link)):
160 | blink = await transcript(
161 | link, "https://s.humanssurvival.com/", "https://1topjob.xyz/", 5
162 | )
163 | elif bool(match(r"https?:\/\/mdisk\.\S+", link)):
164 | blink = await transcript(
165 | link, "https://mdisk.pro", "https://www.meclipstudy.in/", 5
166 | )
167 | elif bool(match(r"https?:\/\/(pkin|go.paisakamalo)\.\S+", link)):
168 | blink = await transcript(
169 | link, "https://go.paisakamalo.in", "https://healthtips.techkeshri.com/", 5
170 | )
171 | elif bool(match(r"https?:\/\/linkpays\.\S+", link)):
172 | blink = await transcript(
173 | link, "https://tech.smallinfo.in/Gadget/", "https://finance.filmypoints.in/", 6
174 | )
175 | elif bool(match(r"https?:\/\/sklinks\.\S+", link)):
176 | blink = await transcript(
177 | link, "https://sklinks.in", "https://dailynew.online/", 5
178 | )
179 | elif bool(match(r"https?:\/\/link1s\.\S+", link)):
180 | blink = await transcript(
181 | link, "https://link1s.com", "https://anhdep24.com/", 9
182 | )
183 | elif bool(match(r"https?:\/\/tulinks\.\S+", link)):
184 | blink = await transcript(
185 | link, "https://tulinks.one", "https://www.blogger.com/", 8
186 | )
187 | elif bool(match(r"https?:\/\/.+\.tulinks\.\S+", link)):
188 | blink = await transcript(
189 | link, "https://go.tulinks.online", "https://tutelugu.co/", 8
190 | )
191 | elif bool(match(r"https?:\/\/(.+\.)?vipurl\.\S+", link)):
192 | blink = await transcript(
193 | link, "https://count.vipurl.in/", "https://kiss6kartu.in/", 5
194 | )
195 | elif bool(match(r"https?:\/\/indyshare\.\S+", link)):
196 | blink = await transcript(
197 | link, "https://indyshare.net", "https://insurancewolrd.in/", 3.1
198 | )
199 | elif bool(match(r"https?:\/\/linkyearn\.\S+", link)):
200 | blink = await transcript(
201 | link, "https://linkyearn.com", "https://gktech.uk/", 5
202 | )
203 | elif bool(match(r"https?:\/\/earn4link\.\S+", link)):
204 | blink = await transcript(
205 | link, "https://m.open2get.in/", "https://ezeviral.com/", 8
206 | )
207 | elif bool(match(r"https?:\/\/linksly\.\S+", link)):
208 | blink = await transcript(
209 | link, "https://go.linksly.co/", "https://en.themezon.net/", 5
210 | )
211 | elif bool(match(r"https?:\/\/(.+\.)?mdiskshortner\.\S+", link)):
212 | blink = await transcript(
213 | link, "https://mdiskshortner.link", "https://yosite.net/", 0
214 | )
215 | elif bool(match(r"https?://(?:\w+\.)?rocklinks\.\S+", link)):
216 | blink = await transcript(
217 | link, "https://land.povathemes.com/", "https://blog.disheye.com/", 4.9
218 | )
219 | elif bool(match(r"https?:\/\/mplaylink\.\S+", link)):
220 | blink = await transcript(
221 | link, "https://tera-box.cloud/", "https://mvplaylink.in.net/", 5
222 | )
223 | elif bool(match(r"https?:\/\/shrinke\.\S+", link)):
224 | blink = await transcript(
225 | link, "https://en.shrinke.me/", "https://themezon.net/", 15
226 | )
227 | elif bool(match(r"https?:\/\/urlspay\.\S+", link)):
228 | blink = await transcript(
229 | link, "https://finance.smallinfo.in/", "https://tech.filmypoints.in/", 5
230 | )
231 | elif bool(match(r"https?:\/\/.+\.tnvalue\.\S+", link)):
232 | blink = await transcript(
233 | link, "https://page.finclub.in/", "https://finclub.in/", 8
234 | )
235 | elif bool(match(r"https?:\/\/sxslink\.\S+", link)):
236 | blink = await transcript(
237 | link, "https://getlink.sxslink.com/", "https://cinemapettai.in/", 5
238 | )
239 |
240 | elif bool(match(r"https?:\/\/moneycase\.\S+", link)):
241 | blink = await transcript(
242 | link, "https://last.moneycase.link/", "https://www.infokeeda.xyz/", 3.1
243 | )
244 | elif bool(match(r"https?:\/\/urllinkshort\.\S+", link)):
245 | blink = await transcript(
246 | link, "https://web.urllinkshort.in", "https://suntechu.in/", 5
247 | )
248 | elif bool(match(r"https?:\/\/.+\.dtglinks\.\S+", link)):
249 | blink = await transcript(
250 | link, "https://happyfiles.dtglinks.in/", "https://tech.filohappy.in/", 5
251 | )
252 | elif bool(match(r"https?:\/\/v2links\.\S+", link)):
253 | blink = await transcript(
254 | link, "https://vzu.us/", "https://newsbawa.com/", 5
255 | )
256 | elif bool(match(r"https?:\/\/(.+\.)?kpslink\.\S+", link)):
257 | blink = await transcript(
258 | link, "https://kpslink.in/", "https://infotamizhan.xyz/", 3.1
259 | )
260 | elif bool(match(r"https?:\/\/v2.kpslink\.\S+", link)):
261 | blink = await transcript(
262 | link, "https://v2.kpslink.in/", "https://infotamizhan.xyz/", 5
263 | )
264 | elif bool(match(r"https?:\/\/tamizhmasters\.\S+", link)):
265 | blink = await transcript(
266 | link, "https://tamizhmasters.com/", "https://pokgames.com/", 5
267 | )
268 | elif bool(match(r"https?:\/\/tglink\.\S+", link)):
269 | blink = await transcript(
270 | link, "https://tglink.in/", "https://www.proappapk.com/", 5
271 | )
272 | elif bool(match(r"https?:\/\/pandaznetwork\.\S+", link)):
273 | blink = await transcript(
274 | link, "https://pandaznetwork.com/", "https://panda.freemodsapp.xyz/", 5
275 | )
276 | elif bool(match(r"https?:\/\/url4earn\.\S+", link)):
277 | blink = await transcript(
278 | link, "https://go.url4earn.in/", "https://techminde.com/", 8
279 | )
280 | elif bool(match(r"https?:\/\/ez4short\.\S+", link)):
281 | blink = await transcript(
282 | link, "https://ez4short.com/", "https://ez4mods.com/", 5
283 | )
284 | elif bool(match(r"https?:\/\/dalink\.\S+", link)):
285 | blink = await transcript(
286 | link, "https://get.tamilhit.tech/MR-X/tamil/", "https://www.tamilhit.tech/", 8
287 | )
288 | elif bool(match(r"https?:\/\/.+\.omnifly\.\S+", link)):
289 | blink = await transcript(
290 | link, "https://f.omnifly.in.net/", "https://ignitesmm.com/", 5
291 | )
292 | elif bool(match(r"https?:\/\/sheralinks\.\S+", link)):
293 | blink = await transcript(
294 | link, "https://sheralinks.com/", "https://blogyindia.com/", 0.8
295 | )
296 | elif bool(match(r"https?:\/\/bindaaslinks\.\S+", link)):
297 | blink = await transcript(
298 | link, "https://appsinsta.com/blog", "https://pracagov.com/", 3
299 | )
300 | elif bool(match(r"https?:\/\/viplinks\.\S+", link)):
301 | blink = await transcript(
302 | link, "https://m.vip-link.net/", "https://m.leadcricket.com/", 5
303 | )
304 | elif bool(match(r"https?:\/\/.+\.short2url\.\S+", link)):
305 | blink = await transcript(
306 | link, "https://techyuth.xyz/blog/", "https://blog.mphealth.online/", 10
307 | )
308 | elif bool(match(r"https?:\/\/shrinkforearn\.\S+", link)):
309 | blink = await transcript(
310 | link, "https://shrinkforearn.in/", "https://wp.uploadfiles.in/", 8
311 | )
312 | elif bool(match(r"https?:\/\/bringlifes\.\S+", link)):
313 | blink = await transcript(
314 | link, "https://bringlifes.com/", "https://loanoffering.in/", 5
315 | )
316 | elif bool(match(r"https?:\/\/.+\.linkfly\.\S+", link)):
317 | blink = await transcript(
318 | link, "https://insurance.yosite.net/", "https://yosite.net/", 10
319 | )
320 | elif bool(match(r"https?:\/\/.+\.earn2me\.\S+", link)):
321 | blink = await transcript(
322 | link, "https://blog.filepresident.com/", "https://easyworldbusiness.com/", 5
323 | )
324 | elif bool(match(r"https?:\/\/.+\.vplinks\.\S+", link)):
325 | blink = await transcript(
326 | link, "https://vplink.in", "https://insurance.findgptprompts.com/", 5
327 | )
328 | elif bool(match(r"https?:\/\/.+\.narzolinks\.\S+", link)):
329 | blink = await transcript(
330 | link, "https://go.narzolinks.click/", "https://hydtech.in/", 5
331 | )
332 | elif bool(match(r"https?:\/\/earn2short\.\S+", link)):
333 | blink = await transcript(
334 | link, "https://go.earn2short.in/", "https://tech.insuranceinfos.in/", 0.8
335 | )
336 | elif bool(match(r"https?:\/\/instantearn\.\S+", link)):
337 | blink = await transcript(
338 | link, "https://get.instantearn.in/", "https://love.petrainer.in/", 5
339 | )
340 | elif bool(match(r"https?:\/\/linkjust\.\S+", link)):
341 | blink = await transcript(
342 | link, "https://linkjust.com/", "https://forexrw7.com/", 3.1
343 | )
344 | elif bool(match(r"https?:\/\/pdiskshortener\.\S+", link)):
345 | blink = await transcript(
346 | link, "https://pdiskshortener.com/", "", 10
347 | )
348 | elif bool(match(r"https?:\/\/publicearn\.\S+", link)):
349 | blink = await transcript(
350 | link, "https://publicearn.com/", "https://careersides.com/", 4.9
351 | )
352 | elif bool(match(r"https?:\/\/modijiurl\.\S+", link)):
353 | blink = await transcript(
354 | link, "https://modijiurl.com/", "https://loanoffering.in/", 8
355 | )
356 | elif bool(match(r"https?:\/\/linkshortx\.\S+", link)):
357 | blink = await transcript(
358 | link, "https://linkshortx.in/", "https://nanotech.org.in/", 4.9
359 | )
360 | elif bool(match(r"https?:\/\/.+\.shorito\.\S+", link)):
361 | blink = await transcript(
362 | link, "https://go.shorito.com/", "https://healthgo.gorating.in/", 8
363 | )
364 | elif bool(match(r"https?:\/\/pdisk\.\S+", link)):
365 | blink = await transcript(
366 | link, "https://last.moneycase.link/", "https://www.webzeni.com/", 4.9
367 | )
368 | elif bool(match(r"https?:\/\/ziplinker\.\S+", link)):
369 | blink = await transcript(
370 | link, "https://ziplinker.net", "https://fintech.techweeky.com/", 1
371 | )
372 | elif bool(match(r"https?:\/\/ouo\.\S+", link)):
373 | blink = await ouo(link)
374 | elif bool(match(r"https?:\/\/(shareus|shrs)\.\S+", link)):
375 | blink = await shareus(link)
376 | elif bool(match(r"https?:\/\/(.+\.)?dropbox\.\S+", link)):
377 | blink = await dropbox(link)
378 | elif bool(match(r"https?:\/\/linkvertise\.\S+", link)):
379 | blink = await linkvertise(link)
380 | elif bool(match(r"https?:\/\/rslinks\.\S+", link)):
381 | blink = await rslinks(link)
382 | elif bool(match(r"https?:\/\/(bit|tinyurl|(.+\.)short|shorturl|t)\.\S+", link)):
383 | blink = await shorter(link)
384 | elif bool(match(r"https?:\/\/appurl\.\S+", link)):
385 | blink = await appurl(link)
386 | elif bool(match(r"https?:\/\/surl\.\S+", link)):
387 | blink = await surl(link)
388 | elif bool(match(r"https?:\/\/thinfi\.\S+", link)):
389 | blink = await thinfi(link)
390 | elif bool(match(r"https?:\/\/justpaste\.\S+", link)):
391 | blink = await justpaste(link)
392 | elif bool(match(r"https?:\/\/linksxyz\.\S+", link)):
393 | blink = await linksxyz(link)
394 |
395 | # DL Sites
396 | elif bool(match(r"https?:\/\/cinevood\.\S+", link)):
397 | return await cinevood(link)
398 | elif bool(match(r"https?:\/\/kayoanime\.\S+", link)):
399 | return await kayoanime(link)
400 | elif bool(match(r"https?:\/\/toonworld4all\.\S+", link)):
401 | return await toonworld4all(link)
402 | elif bool(match(r"https?:\/\/skymovieshd\.\S+", link)):
403 | return await skymovieshd(link)
404 | elif bool(match(r"https?:\/\/.+\.sharespark\.\S+", link)):
405 | return await sharespark(link)
406 | elif bool(match(r"https?:\/\/.+\.1tamilmv\.\S+", link)):
407 | return await tamilmv(link)
408 |
409 | # DL Links
410 | elif bool(match(r"https?:\/\/hubdrive\.\S+", link)):
411 | return await drivescript(link, Config.HUBDRIVE_CRYPT, "HubDrive")
412 | elif bool(match(r"https?:\/\/katdrive\.\S+", link)):
413 | return await drivescript(link, Config.KATDRIVE_CRYPT, "KatDrive")
414 | elif bool(match(r"https?:\/\/drivefire\.\S+", link)):
415 | return await drivescript(link, Config.DRIVEFIRE_CRYPT, "DriveFire")
416 | elif bool(match(r"https?:\/\/sharer\.\S+", link)):
417 | return await sharerpw(link)
418 | elif is_share_link(link):
419 | if "gdtot" in domain:
420 | return await gdtot(link)
421 | elif "filepress" in domain or "pressbee" in domain:
422 | return await filepress(link)
423 | elif "appdrive" in domain or "gdflix" in domain:
424 | return await appflix(link)
425 | else:
426 | return await sharer_scraper(link)
427 |
428 | # Exceptions
429 | elif bool(match(r"https?:\/\/.+\.technicalatg\.\S+", link)):
430 | raise DDLException("Bypass Not Allowed !")
431 | else:
432 | raise DDLException(
433 | f"No Bypass Function Found for your Link : {link}"
434 | )
435 |
436 | if onlylink:
437 | return blink
438 |
439 | links = []
440 | while True:
441 | try:
442 | links.append(blink)
443 | blink = await direct_link_checker(blink, onlylink=True)
444 | if is_excep_link(links[-1]):
445 | links.append("\n\n" + blink)
446 | break
447 | except Exception:
448 | break
449 | return links
450 |
--------------------------------------------------------------------------------