├── img
├── A
├── screenshot1.jpg
└── screenshot2.jpg
├── subscription_data.txt
├── run cmnd.txt
├── Procfile
├── app.py
├── vars.py
├── Dockerfile
├── logs.py
├── requirements.txt
├── README.md
├── utils.py
├── youtube_cookies.txt
├── core.py
└── main.py
/img/A:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/subscription_data.txt:
--------------------------------------------------------------------------------
1 | premium users
2 |
--------------------------------------------------------------------------------
/run cmnd.txt:
--------------------------------------------------------------------------------
1 | gunicorn app:app & python3 main.py
2 |
--------------------------------------------------------------------------------
/Procfile:
--------------------------------------------------------------------------------
1 | worker: python3 main.py
2 | web: python app.py
3 |
--------------------------------------------------------------------------------
/img/screenshot1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Popeye68/TXT-TO-VIDEO/HEAD/img/screenshot1.jpg
--------------------------------------------------------------------------------
/img/screenshot2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/Popeye68/TXT-TO-VIDEO/HEAD/img/screenshot2.jpg
--------------------------------------------------------------------------------
/app.py:
--------------------------------------------------------------------------------
1 | from flask import Flask
2 |
3 | app = Flask(__name__)
4 |
5 | @app.route("/")
6 | def hello():
7 | return "Hello, World! from spidy"
8 |
9 | if __name__ == "__main__":
10 | app.run(host="0.0.0.0", port=8080)
11 |
--------------------------------------------------------------------------------
/vars.py:
--------------------------------------------------------------------------------
1 | # DON'T add anything here just add in render's secret or env section
2 | from os import environ
3 |
4 | API_ID = int(environ.get("API_ID", ""))
5 | API_HASH = environ.get("API_HASH", "")
6 | BOT_TOKEN = environ.get("BOT_TOKEN", "")
7 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.10.8-slim-buster
2 | RUN apt-get update -y && apt-get upgrade -y \
3 | && apt-get install -y --no-install-recommends gcc libffi-dev musl-dev ffmpeg aria2 python3-pip \
4 | && apt-get clean \
5 | && rm -rf /var/lib/apt/lists/*
6 |
7 | COPY . /app/
8 | WORKDIR /app/
9 | RUN pip3 install --no-cache-dir --upgrade --requirement requirements.txt
10 | RUN pip install pytube
11 | ENV COOKIES_FILE_PATH="youtube_cookies.txt"
12 | CMD gunicorn app:app & python3 main.py
13 | #spidy
14 |
--------------------------------------------------------------------------------
/logs.py:
--------------------------------------------------------------------------------
1 | # Don't Remove Credit Tg - @spidy_bots
2 |
3 | import logging
4 | from logging.handlers import RotatingFileHandler
5 |
6 | logging.basicConfig(
7 | level=logging.ERROR,
8 | format=
9 | "%(asctime)s - %(levelname)s - %(message)s [%(filename)s:%(lineno)d]",
10 | datefmt="%d-%b-%y %H:%M:%S",
11 | handlers=[
12 | RotatingFileHandler("logs.txt", maxBytes=50000000, backupCount=10),
13 | logging.StreamHandler(),
14 | ],
15 | )
16 | logging.getLogger("pyrogram").setLevel(logging.WARNING)
17 |
18 |
19 | logging = logging.getLogger()
20 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | async-lru==2.0.2
2 | certifi==2023.5.7
3 | charset-normalizer==3.1.0
4 | idna==3.4
5 | mutagen==1.46.0
6 | pyaes==1.6.1
7 | pycryptodome
8 | pyrogram
9 | pyromod==1.5
10 | PySocks==1.7.1
11 | python-dotenv==1.0.0
12 | requests==2.31.0
13 | soupsieve==2.4.1
14 | TgCrypto==1.2.5
15 | urllib3==2.0.3
16 | websockets==11.0.3
17 | yt-dlp==2025.01.26
18 | motor
19 | aiohttp
20 | aiofiles
21 | pytz
22 | ffmpeg==1.4
23 | umongo==3.1.0
24 | speedtest-cli
25 | Flask==1.1.2
26 | gunicorn==20.1.0
27 | Jinja2==3.0.3
28 | werkzeug==2.0.2
29 | itsdangerous==2.0.1
30 | youtube-dl
31 | pytube==15.0.0
32 | bs4
33 | beautifulsoup4
34 | cloudscraper
35 | m3u8
36 | python-telegram-bot==20.3
37 | requests==2.31.0
38 | pywidevine
39 |
40 |
41 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | ---
10 |
11 | ### **Features**
12 | - Convert your txt into video
13 | - Working fine for pw, appx and classplus txt
14 | - Multiple modes & custom output options
15 | - Supports `/upload`, `/advance`, `/spidy`, and more
16 |
17 | ---
18 |
19 | ### **Bot Commands**
20 | ```
21 | /start - Run bot
22 | /stop - Stop ongoing task
23 | /upload - For using in groups
24 | /advance - Advance download
25 | /spidy - All types txt
26 | /alpha - Much better
27 | /bravo - Another one
28 | ```
29 |
30 | ---
31 |
32 | ### 🚀 Deploy to Render
33 |
34 | #### **1-Click Deploy:**
35 | [](https://render.com/deploy?repo=https://github.com/yourusername/TXT-TO-VIDEO)
36 |
37 | #### **Manual Setup on Render:**
38 | ```bash
39 | 1. Fork the repo
40 | 2. Go to https://dashboard.render.com
41 | 3. Click "New Web Service"
42 | 4. Connect GitHub and select your repo
43 | 5. Set build command: pip install -r requirements.txt
44 | 6. Set start command: python3 bot.py
45 | 7. Add environment variables: API_ID, API_HASH, BOT_TOKEN, etc.
46 | 8. Click "Deploy"
47 | ```
48 |
49 | ---
50 |
51 | ### 🖥️ Run on VPS
52 | ```bash
53 | # Update system and install dependencies
54 | sudo apt update && sudo apt install git python3-pip -y
55 |
56 | # Clone the repo
57 | git clone https://github.com/popeye68/TXT-TO-VIDEO
58 | cd TXT-TO-VIDEO
59 |
60 | # Install requirements
61 | pip3 install -r requirements.txt
62 |
63 | # Export environment variables or add them in config.py
64 | export API_ID=123456
65 | export API_HASH=your_api_hash
66 | export BOT_TOKEN=your_bot_token
67 |
68 | # Run the bot
69 | python3 bot.py
70 | ```
71 | ---
72 |
73 | ### 📸 Screenshots
74 |
75 |
76 |
77 |
78 |
79 | ---
80 |
81 | ### ⚡ Feel Free to Fork and Customize
82 | This project is open source — you're welcome to change or enhance it after forking.
83 |
84 | ---
85 |
86 | ### Connect with us
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
--------------------------------------------------------------------------------
/utils.py:
--------------------------------------------------------------------------------
1 | # Don't Remove Credit Telegram - @spidy_universe
2 | import time
3 | import math
4 | import os
5 | from pyrogram.errors import FloodWait
6 |
7 | class Timer:
8 | def __init__(self, time_between=5):
9 | self.start_time = time.time()
10 | self.time_between = time_between
11 |
12 | def can_send(self):
13 | if time.time() > (self.start_time + self.time_between):
14 | self.start_time = time.time()
15 | return True
16 | return False
17 |
18 |
19 | from datetime import datetime,timedelta
20 |
21 | def hrb(value, digits= 2, delim= "", postfix=""):
22 | """Return a human-readable file size.
23 | """
24 | if value is None:
25 | return None
26 | chosen_unit = "B"
27 | for unit in ("KiB", "MiB", "GiB", "TiB"):
28 | if value > 1000:
29 | value /= 1024
30 | chosen_unit = unit
31 | else:
32 | break
33 | return f"{value:.{digits}f}" + delim + chosen_unit + postfix
34 |
35 | def hrt(seconds, precision = 0):
36 | """Return a human-readable time delta as a string.
37 | """
38 | pieces = []
39 | value = timedelta(seconds=seconds)
40 |
41 |
42 | if value.days:
43 | pieces.append(f"{value.days}d")
44 |
45 | seconds = value.seconds
46 |
47 | if seconds >= 3600:
48 | hours = int(seconds / 3600)
49 | pieces.append(f"{hours}h")
50 | seconds -= hours * 3600
51 |
52 | if seconds >= 60:
53 | minutes = int(seconds / 60)
54 | pieces.append(f"{minutes}m")
55 | seconds -= minutes * 60
56 |
57 | if seconds > 0 or not pieces:
58 | pieces.append(f"{seconds}s")
59 |
60 | if not precision:
61 | return "".join(pieces)
62 |
63 | return "".join(pieces[:precision])
64 |
65 |
66 |
67 | timer = Timer()
68 |
69 | async def progress_bar(current, total, reply, start):
70 | if timer.can_send():
71 | now = time.time()
72 | diff = now - start
73 | if diff < 1:
74 | return
75 | else:
76 | perc = f"{current * 100 / total:.1f}%"
77 | elapsed_time = round(diff)
78 | speed = current / elapsed_time
79 | remaining_bytes = total - current
80 | if speed > 0:
81 | eta_seconds = remaining_bytes / speed
82 | eta = hrt(eta_seconds, precision=1)
83 | else:
84 | eta = "-"
85 | sp = str(hrb(speed)) + "/s"
86 | tot = hrb(total)
87 | cur = hrb(current)
88 | bar_length = 11
89 | completed_length = int(current * bar_length / total)
90 | remaining_length = bar_length - completed_length
91 | progress_bar = "▰" * completed_length + "▱" * remaining_length
92 |
93 | try:
94 | await reply.edit(f'\n ╭──⌯════🆄︎ᴘʟᴏᴀᴅɪɴɢ⬆️⬆️═════⌯──╮ \n├⚡ {progress_bar}|﹝{perc}﹞ \n├🚀 Speed » {sp} \n├📟 Processed » {cur}\n├🧲 Size - ETA » {tot} - {eta} \n├🤖 𝔹ʏ » @Engineers_Babu\n╰─═══ ✪ @Engineers_Babu ✪ ═══─╯\n')
95 | except FloodWait as e:
96 | time.sleep(e.x)
97 |
98 |
--------------------------------------------------------------------------------
/youtube_cookies.txt:
--------------------------------------------------------------------------------
1 | # update if needed
2 | # Netscape HTTP Cookie File
3 | # https://curl.haxx.se/rfc/cookie_spec.html
4 | # This is a generated file! Do not edit.
5 |
6 | .youtube.com TRUE / FALSE 1777274127 SID g.a000vAhbPui64qO_YR7LqV_OD9rQh6UUyThvFanW1Zcf_B2tH64WFZzBRcOt93dwYmDllPfAzwACgYKAW0SARQSFQHGX2MiejxjlVwGPe8FmV1iYkIubRoVAUF8yKqwUjhQLMjELd6EheZapOUJ0076
7 | .youtube.com TRUE / TRUE 1771593414 __Secure-1PSIDTS sidts-CjIBEJ3XV0iE8Jj7Vr-Vee325nUrFC62rEqAHPcr3O-qkwuqIR7Xc4_G4L_OKG3l_w4NsBAA
8 | .youtube.com TRUE / TRUE 1771593414 __Secure-3PSIDTS sidts-CjIBEJ3XV0iE8Jj7Vr-Vee325nUrFC62rEqAHPcr3O-qkwuqIR7Xc4_G4L_OKG3l_w4NsBAA
9 | .youtube.com TRUE / TRUE 1777274127 __Secure-1PSID g.a000vAhbPui64qO_YR7LqV_OD9rQh6UUyThvFanW1Zcf_B2tH64Whsrfpby7S0G0GZFjz-ObeQACgYKAdkSARQSFQHGX2MiCNYszIEN4DWLVW1thGI9hhoVAUF8yKole85ZD4729SAdhN55TieB0076
10 | .youtube.com TRUE / TRUE 1777274127 __Secure-3PSID g.a000vAhbPui64qO_YR7LqV_OD9rQh6UUyThvFanW1Zcf_B2tH64WqF6CsPf0ZA52pzTNhefutAACgYKAd4SARQSFQHGX2MipmrirchRaHDeTbDDI4L-lhoVAUF8yKoxk07zhgKCmJJu2Mf3tQzb0076
11 | .youtube.com TRUE / FALSE 1777274127 HSID AnSge7RMzY3sVT1iq
12 | .youtube.com TRUE / TRUE 1777274127 SSID AbIIvQfcjNFQUUFIU
13 | .youtube.com TRUE / FALSE 1777274127 APISID fJ-fKDh7dZsRBtJQ/AUxca7UUaSx7aPRTj
14 | .youtube.com TRUE / TRUE 1777274127 SAPISID EOUYKa4zPNr0K9Dw/AEVRuBpKdzOrBobqd
15 | .youtube.com TRUE / TRUE 1777274127 __Secure-1PAPISID EOUYKa4zPNr0K9Dw/AEVRuBpKdzOrBobqd
16 | .youtube.com TRUE / TRUE 1777274127 __Secure-3PAPISID EOUYKa4zPNr0K9Dw/AEVRuBpKdzOrBobqd
17 | .youtube.com TRUE / FALSE 1774416966 SIDCC AKEyXzUzTk8KvEoIHaKxrvJdsvQUFW4oCsGT4UkqkVul8bb6xUocY3dedaos2w-L2XIDIN-TGg
18 | .youtube.com TRUE / TRUE 1774416966 __Secure-1PSIDCC AKEyXzVx2IWXY3R2PEfrRFPAu1pI6ZQBFh0CJNIWRNdkyVGf5Ymfuiy6cM9xktNGGmefiM04pA
19 | .youtube.com TRUE / TRUE 1774416966 __Secure-3PSIDCC AKEyXzXrR4A9Ga1O_dc4Pz4P_08kPmR4Y74CIjDG24EllTOWP-dE8FbWCjmci7553gZphK6B2Hw
20 | .youtube.com TRUE / TRUE 1774617835 LOGIN_INFO AFmmF2swRQIgZ3gelzAYyDkFbnwB75GBC_KK3--cw3_tG3Yan3lyVtsCIQDUmFJg2lVRVGk2ZepuxJXlSLYD1b1DLk12PT8LuoOKXQ:QUQ3MjNmeVNXTmtNcjZodE1PcTFBMTUtV0N2TXJsVHpuRS02bkFMaklsdFlwaUJITENaQ1ZUNlUzdEdFRjlqQW10c0NUYXRxSW1OaFpsNXgtcV84NWRqR0MwUkFzcE9KcUtxYXZWNFhmMzJ6N0pBVWlPUXMyd3ctNTd6ZW1qZ1B6b1lIYVo2UXRfNm5PUFY3YTJyUE9QbTNCcEw4THI3clNn
21 | .youtube.com TRUE / TRUE 1758432946 __Secure-ROLLOUT_TOKEN CL-gmfP10OPZigEQy6WcyazSiwMY6svGvcGkjAM%3D
22 | .youtube.com TRUE / TRUE 1758432950 VISITOR_INFO1_LIVE sLtxd7RaOq4
23 | .youtube.com TRUE / TRUE 1758432950 VISITOR_PRIVACY_METADATA CgJJThIEGgAgJQ%3D%3D
24 | .youtube.com TRUE / TRUE 1777440951 PREF f6=40000000&tz=Asia.Kolkata&f7=100
25 | .youtube.com TRUE / FALSE 1740121460 ST-10d87nd csn=RWQXIQky0d9RTq8C&itct=CG0Q_FoiEwj-h5XHmdSLAxXXjWYCHfRUEBsyCmctaGlnaC1yZWNaD0ZFd2hhdF90b193YXRjaJoBBhCOHhieAQ%3D%3D
26 | .youtube.com TRUE / FALSE 1740145530 ST-x3zc7r csn=VNEM7hwVfOiXXS9K&itct=CGoQ_FoiEwiM5s6c89SLAxV2tFYBHY28ErMyCmctaGlnaC1yZWNaD0ZFd2hhdF90b193YXRjaJoBBhCOHhieAQ%3D%3D
27 | .youtube.com TRUE / FALSE 1740398801 ST-h3az29 csn=1dm2HeRfs0reoVLa&itct=CG0Q_FoiEwjT8tfdotyLAxWit1YBHVL-NqIyCmctaGlnaC1yZWNaD0ZFd2hhdF90b193YXRjaJoBBhCOHhieAQ%3D%3D
28 | .youtube.com TRUE / FALSE 1740407388 ST-1yod0rt csn=xbaMD_ZSl3bDpf1D&itct=CBAQ1TYiEwixs5u8wtyLAxVchNgFHfWHEqs%3D
29 | .youtube.com TRUE / FALSE 1740575089 ST-13avi31 csn=TMcaElOy7oFUEZ6t&itct=CG0Q_FoiEwjjpby6s-GLAxXozjQHHVTGMoYyCmctaGlnaC1yZWNaD0ZFd2hhdF90b193YXRjaJoBBhCOHhieAQ%3D%3D
30 | .youtube.com TRUE / FALSE 1740575166 ST-up1ya7 csn=ci2T3xaoyMSW60FS&itct=CGQQh_YEGAEiEwjK1aC8s-GLAxXSdF4EHUewKZ9aD0ZFd2hhdF90b193YXRjaJoBBQgkEI4e
31 | .youtube.com TRUE / FALSE 1740825158 ST-15v4jje csn=ShR-kMwzS2nm7NWc&itct=CG0Q_FoiEwiBsrqE1-iLAxVU4jQHHUARAuMyCmctaGlnaC1yZWNaD0ZFd2hhdF90b193YXRjaJoBBhCOHhieAQ%3D%3D
32 | .youtube.com TRUE / FALSE 1740839233 ST-1668un8 csn=6FQ8DPHFfRuLy6aq&itct=CFYQ_9AFIhMI0OarvIvpiwMVKutMAh1paQ7RMhhob21lcGFnZV9tb2JpbGVfbWFzdGhlYWRI_sec0qyblacZ
33 | .youtube.com TRUE / FALSE 1741175595 ST-zg8eie csn=OdLo7ZrCDAXILz7N&itct=CBQQkeQHGAUiEwjJg9LA8PKLAxVkmWMGHdbDKcE%3D
34 | .youtube.com TRUE / FALSE 1741411252 ST-s1ooxb csn=V3e-y-Hmmo0Q-IxF&itct=CCsQpDAiEwje4_-z3vmLAxV1dJ0JHTdTAstaGFVDQndHdjQwbWhDNmRIN1gwZmFVQ2ZzUZoBAxDyOA%3D%3D
35 | .youtube.com TRUE / TRUE 0 YSC Al5ioh1xz4c
36 | .youtube.com TRUE / FALSE 1742880962 ST-1qhkeb4 csn=_P4y2MejdGQTDVUb&itct=CIcBEIf2BBgAIhMI_bLkv8GkjAMV8p-vAR3Xhy0xWg9GRXdoYXRfdG9fd2F0Y2iaAQUIJBCOHg%3D%3D
37 | .youtube.com TRUE / TRUE 1742881556 CONSISTENCY AKreu9toy_XonvNsxjYREgTfqpasxEb1sbqB7sBCz0jS63z4w9A67ZsI-vWIGxa8iHRNw0BElW36o55bqCcR_JY6EmiumZlb7OXfxrBWkGlRHOWd0QmNyRgq-pA
38 |
--------------------------------------------------------------------------------
/core.py:
--------------------------------------------------------------------------------
1 | # Don't Remove Credit Tg - @spidy_universe
2 |
3 | import os
4 | import time
5 | import datetime
6 | import aiohttp
7 | import aiofiles
8 | import asyncio
9 | import logging
10 | import requests
11 | import tgcrypto
12 | import subprocess
13 | import concurrent.futures
14 |
15 | from utils import progress_bar
16 |
17 | from pyrogram import Client, filters
18 | from pyrogram.types import Message
19 |
20 | from pytube import Playlist #Youtube Playlist Extractor
21 | from yt_dlp import YoutubeDL
22 | import yt_dlp as youtube_dl
23 |
24 | def duration(filename):
25 | result = subprocess.run(["ffprobe", "-v", "error", "-show_entries",
26 | "format=duration", "-of",
27 | "default=noprint_wrappers=1:nokey=1", filename],
28 | stdout=subprocess.PIPE,
29 | stderr=subprocess.STDOUT)
30 | return float(result.stdout)
31 |
32 | def exec(cmd):
33 | process = subprocess.run(cmd, stdout=subprocess.PIPE,stderr=subprocess.PIPE)
34 | output = process.stdout.decode()
35 | print(output)
36 | return output
37 | #err = process.stdout.decode()
38 | def pull_run(work, cmds):
39 | with concurrent.futures.ThreadPoolExecutor(max_workers=work) as executor:
40 | print("Waiting for tasks to complete")
41 | fut = executor.map(exec,cmds)
42 | async def aio(url,name):
43 | k = f'{name}.pdf'
44 | async with aiohttp.ClientSession() as session:
45 | async with session.get(url) as resp:
46 | if resp.status == 200:
47 | f = await aiofiles.open(k, mode='wb')
48 | await f.write(await resp.read())
49 | await f.close()
50 | return k
51 |
52 |
53 | async def download(url,name):
54 | ka = f'{name}.pdf'
55 | async with aiohttp.ClientSession() as session:
56 | async with session.get(url) as resp:
57 | if resp.status == 200:
58 | f = await aiofiles.open(ka, mode='wb')
59 | await f.write(await resp.read())
60 | await f.close()
61 | return ka
62 |
63 |
64 |
65 | def parse_vid_info(info):
66 | info = info.strip()
67 | info = info.split("\n")
68 | new_info = []
69 | temp = []
70 | for i in info:
71 | i = str(i)
72 | if "[" not in i and '---' not in i:
73 | while " " in i:
74 | i = i.replace(" ", " ")
75 | i.strip()
76 | i = i.split("|")[0].split(" ",2)
77 | try:
78 | if "RESOLUTION" not in i[2] and i[2] not in temp and "audio" not in i[2]:
79 | temp.append(i[2])
80 | new_info.append((i[0], i[2]))
81 | except:
82 | pass
83 | return new_info
84 |
85 |
86 | def vid_info(info):
87 | info = info.strip()
88 | info = info.split("\n")
89 | new_info = dict()
90 | temp = []
91 | for i in info:
92 | i = str(i)
93 | if "[" not in i and '---' not in i:
94 | while " " in i:
95 | i = i.replace(" ", " ")
96 | i.strip()
97 | i = i.split("|")[0].split(" ",3)
98 | try:
99 | if "RESOLUTION" not in i[2] and i[2] not in temp and "audio" not in i[2]:
100 | temp.append(i[2])
101 |
102 | # temp.update(f'{i[2]}')
103 | # new_info.append((i[2], i[0]))
104 | # mp4,mkv etc ==== f"({i[1]})"
105 |
106 | new_info.update({f'{i[2]}':f'{i[0]}'})
107 |
108 | except:
109 | pass
110 | return new_info
111 |
112 |
113 |
114 | async def run(cmd):
115 | proc = await asyncio.create_subprocess_shell(
116 | cmd,
117 | stdout=asyncio.subprocess.PIPE,
118 | stderr=asyncio.subprocess.PIPE)
119 |
120 | stdout, stderr = await proc.communicate()
121 |
122 | print(f'[{cmd!r} exited with {proc.returncode}]')
123 | if proc.returncode == 1:
124 | return False
125 | if stdout:
126 | return f'[stdout]\n{stdout.decode()}'
127 | if stderr:
128 | return f'[stderr]\n{stderr.decode()}'
129 |
130 |
131 |
132 | def old_download(url, file_name, chunk_size = 1024 * 10):
133 | if os.path.exists(file_name):
134 | os.remove(file_name)
135 | r = requests.get(url, allow_redirects=True, stream=True)
136 | with open(file_name, 'wb') as fd:
137 | for chunk in r.iter_content(chunk_size=chunk_size):
138 | if chunk:
139 | fd.write(chunk)
140 | return file_name
141 |
142 |
143 | def human_readable_size(size, decimal_places=2):
144 | for unit in ['B', 'KB', 'MB', 'GB', 'TB', 'PB']:
145 | if size < 1024.0 or unit == 'PB':
146 | break
147 | size /= 1024.0
148 | return f"{size:.{decimal_places}f} {unit}"
149 |
150 |
151 | def time_name():
152 | date = datetime.date.today()
153 | now = datetime.datetime.now()
154 | current_time = now.strftime("%H%M%S")
155 | return f"{date} {current_time}.mp4"
156 |
157 | def get_playlist_videos(playlist_url):
158 | try:
159 | # Create a Playlist object
160 | playlist = Playlist(playlist_url)
161 |
162 | # Get the playlist title
163 | playlist_title = playlist.title
164 |
165 | # Initialize an empty dictionary to store video names and links
166 | videos = {}
167 |
168 | # Iterate through the videos in the playlist
169 | for video in playlist.videos:
170 | try:
171 | video_title = video.title
172 | video_url = video.watch_url
173 | videos[video_title] = video_url
174 | except Exception as e:
175 | logging.error(f"Could not retrieve video details: {e}")
176 |
177 | return playlist_title, videos
178 | except Exception as e:
179 | logging.error(f"An error occurred: {e}")
180 | return None, None
181 |
182 | def get_all_videos(channel_url):
183 | ydl_opts = {
184 | 'quiet': True,
185 | 'extract_flat': True,
186 | 'skip_download': True
187 | }
188 |
189 | all_videos = []
190 | with YoutubeDL(ydl_opts) as ydl:
191 | result = ydl.extract_info(channel_url, download=False)
192 |
193 | if 'entries' in result:
194 | channel_name = result['title']
195 | all_videos.extend(result['entries'])
196 |
197 | while 'entries' in result and '_next' in result:
198 | next_page_url = result['_next']
199 | result = ydl.extract_info(next_page_url, download=False)
200 | all_videos.extend(result['entries'])
201 |
202 | video_links = {index+1: (video['title'], video['url']) for index, video in enumerate(all_videos)}
203 | return video_links, channel_name
204 | else:
205 | return None, None
206 |
207 | def save_to_file(video_links, channel_name):
208 | # Sanitize the channel name to be a valid filename
209 | sanitized_channel_name = re.sub(r'[^\w\s-]', '', channel_name).strip().replace(' ', '_')
210 | filename = f"{sanitized_channel_name}.txt"
211 | with open(filename, 'w', encoding='utf-8') as file:
212 | for number, (title, url) in video_links.items():
213 | # Ensure the URL is formatted correctly
214 | if url.startswith("https://"):
215 | formatted_url = url
216 | elif "shorts" in url:
217 | formatted_url = f"https://www.youtube.com{url}"
218 | else:
219 | formatted_url = f"https://www.youtube.com/watch?v={url}"
220 | file.write(f"{number}. {title}: {formatted_url}\n")
221 | return filename
222 |
223 | async def download_video(url, cmd, name):
224 | download_cmd = f'{cmd} -R 25 --fragment-retries 25 --external-downloader aria2c --downloader-args "aria2c: -x 16 -j 32"'
225 | global failed_counter
226 | print(download_cmd)
227 | logging.info(download_cmd)
228 | k = subprocess.run(download_cmd, shell=True)
229 |
230 | # Check if the URL is of type 'visionias' or 'penpencilvod'
231 | if "visionias" in cmd:
232 | return await download_visionias(url, cmd, name)
233 | elif "penpencilvod" in cmd:
234 | return await download_penpencilvod(url, cmd, name)
235 | else:
236 | # Default handling for other types of URLs
237 | return await default_download(url, cmd, name)
238 |
239 | async def download_visionias(url, cmd, name):
240 | global failed_counter
241 | # Retry logic for 'visionias' URLs
242 | if failed_counter <= 10:
243 | failed_counter += 1
244 | await asyncio.sleep(5)
245 | return await download_video(url, cmd, name)
246 | else:
247 | # Reset failed_counter if the download succeeds
248 | failed_counter = 0
249 | return await default_download(url, cmd, name)
250 |
251 |
252 | async def download_penpencilvod(url, cmd, name):
253 | global failed_counter
254 | # Retry logic for 'penpencilvod' URLs
255 | if failed_counter <= 10:
256 | failed_counter += 1
257 | await asyncio.sleep(5)
258 | return await download_video(url, cmd, name)
259 | else:
260 | # Reset failed_counter if the download succeeds
261 | failed_counter = 0
262 | return await default_download(url, cmd, name)
263 |
264 | async def download_video(url,cmd, name):
265 | download_cmd = f'{cmd} -R 25 --fragment-retries 25 --external-downloader aria2c --downloader-args "aria2c: -x 16 -j 32"'
266 | global failed_counter
267 | print(download_cmd)
268 | logging.info(download_cmd)
269 | k = subprocess.run(download_cmd, shell=True)
270 | if "visionias" in cmd and k.returncode != 0 and failed_counter <= 10:
271 | failed_counter += 1
272 | await asyncio.sleep(5)
273 | await download_video(url, cmd, name)
274 | failed_counter = 0
275 | try:
276 | if os.path.isfile(name):
277 | return name
278 | elif os.path.isfile(f"{name}.webm"):
279 | return f"{name}.webm"
280 | name = name.split(".")[0]
281 | if os.path.isfile(f"{name}.mkv"):
282 | return f"{name}.mkv"
283 | elif os.path.isfile(f"{name}.mp4"):
284 | return f"{name}.mp4"
285 | elif os.path.isfile(f"{name}.mp4.webm"):
286 | return f"{name}.mp4.webm"
287 |
288 | return name
289 | except FileNotFoundError as exc:
290 | return os.path.isfile.splitext[0] + "." + "mp4"
291 |
292 |
293 | async def send_doc(bot: Client, m: Message,cc,ka,cc1,prog,count,name):
294 | reply = await m.reply_text(f"Uploading » `{name}`")
295 | time.sleep(1)
296 | start_time = time.time()
297 | await m.reply_document(ka,caption=cc1)
298 | count+=1
299 | await reply.delete (True)
300 | time.sleep(1)
301 | os.remove(ka)
302 | time.sleep(3)
303 |
304 |
305 | async def send_vid(bot: Client, m: Message,cc,filename,thumb,name,prog):
306 |
307 | subprocess.run(f'ffmpeg -i "{filename}" -ss 00:00:12 -vframes 1 "{filename}.jpg"', shell=True)
308 | await prog.delete (True)
309 | reply = await m.reply_text(f"**Uploading ...** - `{name}`")
310 | try:
311 | if thumb == "no":
312 | thumbnail = f"{filename}.jpg"
313 | else:
314 | thumbnail = thumb
315 | except Exception as e:
316 | await m.reply_text(str(e))
317 |
318 | dur = int(duration(filename))
319 |
320 | start_time = time.time()
321 |
322 | try:
323 | await m.reply_video(filename,caption=cc, supports_streaming=True,height=720,width=1280,thumb=thumbnail,duration=dur, progress=progress_bar,progress_args=(reply,start_time))
324 | except Exception:
325 | await m.reply_document(filename,caption=cc, progress=progress_bar,progress_args=(reply,start_time))
326 |
327 |
328 | os.remove(filename)
329 |
330 | os.remove(f"{filename}.jpg")
331 | await reply.delete (True)
332 |
333 | # helper.py
334 | async def download_and_send_video(url, name, chat_id, bot, log_channel_id, accept_logs, caption, m):
335 | """
336 | Downloads a video from a URL and sends it to the specified chat.
337 | Handles encrypted video URLs differently if needed.
338 | """
339 | try:
340 | # Check if the URL is for an encrypted video
341 | if "encrypted" in url:
342 | # Add specific handling for encrypted videos here if necessary
343 | print("Handling encrypted video...")
344 |
345 | # Download the video
346 | async with aiohttp.ClientSession() as session:
347 | async with session.get(url) as response:
348 | if response.status == 200:
349 | video_data = await response.read()
350 | video_path = f"{name}.mp4"
351 |
352 | # Save video to a file
353 | with open(video_path, 'wb') as f:
354 | f.write(video_data)
355 |
356 | # Send the video to the user
357 | message = await bot.send_video(chat_id=chat_id, video=video_path, caption=caption)
358 |
359 | # Log the video to a specific channel if required
360 | if accept_logs == 1:
361 | file_id = message.video.file_id
362 | await bot.send_video(chat_id=log_channel_id, video=file_id, caption=caption)
363 |
364 | # Cleanup: Remove the video file after sending
365 | os.remove(video_path)
366 | else:
367 | await m.reply_text(f"Failed to download video. Status code: {response.status}")
368 | except Exception as e:
369 | await m.reply_text(f"An error occurred: {str(e)}")
370 |
371 |
372 |
373 | async def download_video(url,cmd, name):
374 | download_cmd = f'{cmd} -R infinite --fragment-retries 25 --socket-timeout 50 --external-downloader aria2c --downloader-args "aria2c: -x 16 -j 32" --user-agent "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36"'
375 | global failed_counter
376 | print(download_cmd)
377 | logging.info(download_cmd)
378 | k = subprocess.run(download_cmd, shell=True)
379 | if "visionias" in cmd and k.returncode != 0 and failed_counter <= 10:
380 | failed_counter += 1
381 | await asyncio.sleep(5)
382 | await download_video(url, cmd, name)
383 | failed_counter = 0
384 | try:
385 | if os.path.isfile(name):
386 | return name
387 | elif os.path.isfile(f"{name}.webm"):
388 | return f"{name}.webm"
389 | name = name.split(".")[0]
390 | if os.path.isfile(f"{name}.mkv"):
391 | return f"{name}.mkv"
392 | elif os.path.isfile(f"{name}.mp4"):
393 | return f"{name}.mp4"
394 | elif os.path.isfile(f"{name}.mp4.webm"):
395 | return f"{name}.mp4.webm"
396 |
397 | return name
398 | except FileNotFoundError as exc:
399 | return os.path.isfile.splitext[0] + "." + "mp4"
400 |
401 |
402 | async def send_doc(bot: Client, m: Message,cc,ka,cc1,prog,count,name):
403 | reply = await m.reply_text(f"Uploading » `{name}`")
404 | time.sleep(1)
405 | start_time = time.time()
406 | await m.reply_document(ka,caption=cc1)
407 | count+=1
408 | await reply.delete (True)
409 | time.sleep(1)
410 | os.remove(ka)
411 | time.sleep(3)
412 |
413 |
414 | async def send_vid(bot: Client, m: Message,cc,filename,thumb,name,prog):
415 |
416 | subprocess.run(f'ffmpeg -i "{filename}" -ss 00:00:12 -vframes 1 "{filename}.jpg"', shell=True)
417 | await prog.delete (True)
418 | reply = await m.reply_text(f"**⥣ Uploading...** » `{name}`")
419 | try:
420 | if thumb == "no":
421 | thumbnail = f"{filename}.jpg"
422 | else:
423 | thumbnail = thumb
424 | except Exception as e:
425 | await m.reply_text(str(e))
426 |
427 | dur = int(duration(filename))
428 |
429 | start_time = time.time()
430 |
431 | try:
432 | await m.reply_video(filename,caption=cc, supports_streaming=True,height=720,width=1280,thumb=thumbnail,duration=dur, progress=progress_bar,progress_args=(reply,start_time))
433 | except Exception:
434 | await m.reply_document(filename,caption=cc, progress=progress_bar,progress_args=(reply,start_time))
435 |
436 |
437 | os.remove(filename)
438 |
439 | os.remove(f"{filename}.jpg")
440 | await reply.delete (True)
441 |
442 |
--------------------------------------------------------------------------------
/main.py:
--------------------------------------------------------------------------------
1 | import os
2 | import re
3 | import sys
4 | import json
5 | import time
6 | import asyncio
7 | import requests
8 | import subprocess
9 | import urllib.parse
10 | import yt_dlp
11 | import cloudscraper
12 | import m3u8
13 | import core as helper
14 | from utils import progress_bar
15 | from vars import API_ID, API_HASH, BOT_TOKEN
16 | from aiohttp import ClientSession
17 | from pyromod import listen
18 | from subprocess import getstatusoutput
19 | from pytube import YouTube
20 | from aiohttp import web
21 | import yt_dlp
22 | from pyrogram import Client, filters
23 | from pyrogram.types import Message
24 | from pyrogram.errors import FloodWait
25 | from pyrogram.errors.exceptions.bad_request_400 import StickerEmojiInvalid
26 | from pyrogram.types.messages_and_media import message
27 | from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup
28 |
29 | # Initialize the bot
30 | bot = Client(
31 | "bot",
32 | api_id=API_ID,
33 | api_hash=API_HASH,
34 | bot_token=BOT_TOKEN
35 | )
36 |
37 | photo = "https://i.postimg.cc/dVY9nL63/IMG-20250426-130510-655.jpg"
38 | cpphoto = "https://i.postimg.cc/dVY9nL63/IMG-20250426-130510-655.jpg"
39 | appxzip = "https://i.postimg.cc/dVY9nL63/IMG-20250426-130510-655.jpg"
40 | my_name = "🅂🄿🄸🄳🅈"
41 | CHANNEL_ID = "-1002607772171"##change it with your channel 🆔
42 |
43 | cookies_file_path = os.getenv("COOKIES_FILE_PATH", "youtube_cookies.txt")
44 |
45 | # Define aiohttp routes
46 | routes = web.RouteTableDef()
47 |
48 | @routes.get("/", allow_head=True)
49 | async def root_route_handler(request):
50 | return web.json_response("your_render_url") ## change it with your host url
51 |
52 | async def web_server():
53 | web_app = web.Application(client_max_size=30000000)
54 | web_app.add_routes(routes)
55 | return web_app
56 |
57 | async def start_bot():
58 | await bot.start()
59 | print("Bot is up and running")
60 |
61 | async def stop_bot():
62 | await bot.stop()
63 |
64 | async def main():
65 | if WEBHOOK:
66 | # Start the web server
67 | app_runner = web.AppRunner(await web_server())
68 | await app_runner.setup()
69 | site = web.TCPSite(app_runner, "0.0.0.0", PORT)
70 | await site.start()
71 | print(f"Web server started on port {PORT}")
72 |
73 | # Start the bot
74 | await start_bot()
75 |
76 | # Keep the program running
77 | try:
78 | while True:
79 | await bot.polling() # Run forever, or until interrupted
80 | except (KeyboardInterrupt, SystemExit):
81 | await stop_bot()
82 |
83 |
84 | async def start_bot():
85 | await bot.start()
86 | print("Bot is up and running")
87 |
88 | async def stop_bot():
89 | await bot.stop()
90 |
91 | async def main():
92 | if WEBHOOK:
93 | # Start the web server
94 | app_runner = web.AppRunner(await web_server())
95 | await app_runner.setup()
96 | site = web.TCPSite(app_runner, "0.0.0.0", PORT)
97 | await site.start()
98 | print(f"Web server started on port {PORT}")
99 |
100 | # Start the bot
101 | await start_bot()
102 |
103 | # Keep the program running
104 | try:
105 | while True:
106 | await asyncio.sleep(3600) # Run forever, or until interrupted
107 | except (KeyboardInterrupt, SystemExit):
108 | await stop_bot()
109 |
110 | class Data:
111 | START = (
112 | "🌟 Welcome {0}! 🌟\n\n"
113 | )
114 | # Define the start command handler
115 | @bot.on_message(filters.command("start"))
116 | async def start(client: Client, msg: Message):
117 | user = await client.get_me()
118 | mention = user.mention
119 | start_message = await client.send_message(
120 | msg.chat.id,
121 | Data.START.format(msg.from_user.mention)
122 | )
123 |
124 | await asyncio.sleep(1)
125 | await start_message.edit_text(
126 | Data.START.format(msg.from_user.mention) +
127 | "Initializing Uploader bot... 🤖\n\n"
128 | "Progress: [⬜⬜⬜⬜⬜⬜⬜⬜⬜] 0%\n\n"
129 | )
130 |
131 | await asyncio.sleep(1)
132 | await start_message.edit_text(
133 | Data.START.format(msg.from_user.mention) +
134 | "Loading features... ⏳\n\n"
135 | "Progress: [🟥🟥🟥⬜⬜⬜⬜⬜⬜] 25%\n\n"
136 | )
137 |
138 | await asyncio.sleep(1)
139 | await start_message.edit_text(
140 | Data.START.format(msg.from_user.mention) +
141 | "This may take a moment, sit back and relax! 😊\n\n"
142 | "Progress: [🟧🟧🟧🟧🟧⬜⬜⬜⬜] 50%\n\n"
143 | )
144 |
145 | await asyncio.sleep(1)
146 | await start_message.edit_text(
147 | Data.START.format(msg.from_user.mention) +
148 | "Checking Bot Status... 🔍\n\n"
149 | "Progress: [🟨🟨🟨🟨🟨🟨🟨⬜⬜] 75%\n\n"
150 | )
151 |
152 | await asyncio.sleep(1)
153 | await start_message.edit_text(
154 | Data.START.format(msg.from_user.mention) +
155 | "Checking status Ok... Command Nhi Bataunga **Bot Made BY 🅂🄿🄸🄳🅈™👨🏻💻**🔍\n\n"
156 | "Progress:[🟩🟩🟩🟩🟩🟩🟩🟩🟩] 100%\n\n"
157 | )
158 |
159 | @bot.on_message(filters.command(["stop"]) )
160 | async def restart_handler(_, m):
161 | await m.delete()
162 | await m.reply_text("**STOPPED**🛑", True)
163 | os.execl(sys.executable, sys.executable, *sys.argv)
164 |
165 |
166 | @bot.on_message(filters.command(["king","upload"]) )
167 | async def txt_handler(bot: Client, m: Message):
168 | await m.delete()
169 |
170 | editable = await m.reply_text(f"**🔹Hi I am Poweful TXT Downloader📥 Bot.**\n🔹**Send me the TXT file and wait.**")
171 | input: Message = await bot.listen(editable.chat.id)
172 | x = await input.download()
173 | await input.delete(True)
174 | file_name, ext = os.path.splitext(os.path.basename(x))
175 | credit = f"𝗦𝗣𝗜𝗗𝗬™"
176 | token = f"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3MzYxNTE3MzAuMTI2LCJkYXRhIjp7Il9pZCI6IjYzMDRjMmY3Yzc5NjBlMDAxODAwNDQ4NyIsInVzZXJuYW1lIjoiNzc2MTAxNzc3MCIsImZpcnN0TmFtZSI6IkplZXYgbmFyYXlhbiIsImxhc3ROYW1lIjoic2FoIiwib3JnYW5pemF0aW9uIjp7Il9pZCI6IjVlYjM5M2VlOTVmYWI3NDY4YTc5ZDE4OSIsIndlYnNpdGUiOiJwaHlzaWNzd2FsbGFoLmNvbSIsIm5hbWUiOiJQaHlzaWNzd2FsbGFoIn0sImVtYWlsIjoiV1dXLkpFRVZOQVJBWUFOU0FIQEdNQUlMLkNPTSIsInJvbGVzIjpbIjViMjdiZDk2NTg0MmY5NTBhNzc4YzZlZiJdLCJjb3VudHJ5R3JvdXAiOiJJTiIsInR5cGUiOiJVU0VSIn0sImlhdCI6MTczNTU0NjkzMH0.iImf90mFu_cI-xINBv4t0jVz-rWK1zeXOIwIFvkrS0M"
177 | try:
178 | with open(x, "r") as f:
179 | content = f.read()
180 | content = content.split("\n")
181 | links = []
182 | for i in content:
183 | links.append(i.split("://", 1))
184 | os.remove(x)
185 | except:
186 | await m.reply_text("Invalid file input.")
187 | os.remove(x)
188 | return
189 |
190 | # Edit the message to show the total number of links found
191 | await editable.edit(f"Total links found are **{len(links)}**\n\nSend from where you want to download (initial is **1**).")
192 |
193 | # Wait for user input
194 | input0: Message = await bot.listen(editable.chat.id)
195 | raw_text = input0.text
196 |
197 | # Delete the user's input message
198 | await input0.delete(True)
199 |
200 | # Try to convert the input to an integer, default to 1 if conversion fails
201 | try:
202 | arg = int(raw_text)
203 | except ValueError:
204 | arg = 1
205 |
206 | # If the input is "1", proceed with batch naming and notifications
207 | if raw_text == "1":
208 | # Extract the file name without extension
209 | file_name_without_ext = os.path.splitext(file_name)[0]
210 |
211 | # Create a fancy batch name
212 | fancy_batch_name = f"𝐁𝐚𝐭𝐜𝐡 𝐍𝐚𝐦𝐞: 𝗤𝘂𝗮𝗹𝗶𝘁𝘆".replace("𝗤𝘂𝗮𝗹𝗶𝘁𝘆", file_name_without_ext)
213 |
214 | # Send a message with the batch name and pin it
215 | name_message = await bot.send_message(
216 | m.chat.id,
217 | f"📌 **Batch Name Pinned!** 📌\n"
218 | f"🎨 {fancy_batch_name}\n"
219 | f"✨ Stay organized with your pinned batches 🚀!"
220 | )
221 | await bot.pin_chat_message(m.chat.id, name_message.id)
222 |
223 | # Wait for 2 seconds before proceeding
224 | await asyncio.sleep(2)
225 |
226 | await editable.edit("**Enter Your Batch Name or send d for grabing from text filename.**")
227 | input1: Message = await bot.listen(editable.chat.id)
228 | raw_text0 = input1.text
229 | await input1.delete(True)
230 | if raw_text0 == 'd':
231 | b_name = file_name
232 | else:
233 | b_name = raw_text0
234 |
235 | await editable.edit("**Enter resolution.\n Eg : 480 or 720**")
236 | input2: Message = await bot.listen(editable.chat.id)
237 | raw_text2 = input2.text
238 | await input2.delete(True)
239 | try:
240 | if raw_text2 == "144":
241 | res = "144x256"
242 | elif raw_text2 == "240":
243 | res = "240x426"
244 | elif raw_text2 == "360":
245 | res = "360x640"
246 | elif raw_text2 == "480":
247 | res = "480x854"
248 | elif raw_text2 == "720":
249 | res = "720x1280"
250 | elif raw_text2 == "1080":
251 | res = "1080x1920"
252 | else:
253 | res = "UN"
254 | except Exception:
255 | res = "UN"
256 |
257 | await editable.edit("**Enter Your Name or send 'de' for use default.\n Eg : 𝗦𝗣𝗜𝗗𝗬™👨🏻💻**")
258 | input3: Message = await bot.listen(editable.chat.id)
259 | raw_text3 = input3.text
260 | await input3.delete(True)
261 | if raw_text3 == 'de':
262 | CR = credit
263 | else:
264 | CR = raw_text3
265 |
266 | await editable.edit("**Enter Your PW Token For 𝐌𝐏𝐃 𝐔𝐑𝐋 or send 'Not' for use default**")
267 | input4: Message = await bot.listen(editable.chat.id)
268 | raw_text4 = input4.text
269 | await input4.delete(True)
270 | if raw_text4 == 'not':
271 | MR = token
272 | else:
273 | MR = raw_text4
274 |
275 | await editable.edit("Now send the **Thumb url**\n**Eg :** ``\n\nor Send `no`")
276 | input6 = message = await bot.listen(editable.chat.id)
277 | raw_text6 = input6.text
278 | await input6.delete(True)
279 | await editable.delete()
280 |
281 | thumb = input6.text
282 | if thumb.startswith("http://") or thumb.startswith("https://"):
283 | getstatusoutput(f"wget '{thumb}' -O 'thumb.jpg'")
284 | thumb = "thumb.jpg"
285 | else:
286 | thumb == "no"
287 |
288 | count =int(raw_text)
289 | try:
290 | for i in range(arg-1, len(links)):
291 |
292 | Vxy = links[i][1].replace("file/d/","uc?export=download&id=").replace("www.youtube-nocookie.com/embed", "youtu.be").replace("?modestbranding=1", "").replace("/view?usp=sharing","")
293 | url = "https://" + Vxy
294 | if "visionias" in url:
295 | async with ClientSession() as session:
296 | async with session.get(url, headers={'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Accept-Language': 'en-US,en;q=0.9', 'Cache-Control': 'no-cache', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Referer': 'http://www.visionias.in/', 'Sec-Fetch-Dest': 'iframe', 'Sec-Fetch-Mode': 'navigate', 'Sec-Fetch-Site': 'cross-site', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Linux; Android 12; RMX2121) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Mobile Safari/537.36', 'sec-ch-ua': '"Chromium";v="107", "Not=A?Brand";v="24"', 'sec-ch-ua-mobile': '?1', 'sec-ch-ua-platform': '"Android"',}) as resp:
297 | text = await resp.text()
298 | url = re.search(r"(https://.*?playlist.m3u8.*?)\"", text).group(1)
299 |
300 | if "acecwply" in url:
301 | cmd = f'yt-dlp -o "{name}.%(ext)s" -f "bestvideo[height<={raw_text2}]+bestaudio" --hls-prefer-ffmpeg --no-keep-video --remux-video mkv --no-warning "{url}"'
302 |
303 |
304 | if "visionias" in url:
305 | async with ClientSession() as session:
306 | async with session.get(url, headers={'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Accept-Language': 'en-US,en;q=0.9', 'Cache-Control': 'no-cache', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Referer': 'http://www.visionias.in/', 'Sec-Fetch-Dest': 'iframe', 'Sec-Fetch-Mode': 'navigate', 'Sec-Fetch-Site': 'cross-site', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Linux; Android 12; RMX2121) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Mobile Safari/537.36', 'sec-ch-ua': '"Chromium";v="107", "Not=A?Brand";v="24"', 'sec-ch-ua-mobile': '?1', 'sec-ch-ua-platform': '"Android"',}) as resp:
307 | text = await resp.text()
308 | url = re.search(r"(https://.*?playlist.m3u8.*?)\"", text).group(1)
309 | if 'd1d34p8vz63oiq' in url:
310 | vid_id = url.split("/")[-2]
311 | url = f"https://dl.alphacbse.site/download/{vid_id}/master.m3u8"
312 |
313 | #elif 'media-cdn.classplusapp.com/drm/' in url:
314 | #url = f"https://www.masterapi.tech/get/cp/dl?url={url}"
315 |
316 | elif 'media-cdn.classplusapp.com/drm/' in url:
317 | url = f"https://dragoapi.vercel.app/video/{url}"
318 |
319 |
320 | elif 'videos.classplusapp' in url or "tencdn.classplusapp" in url or "webvideos.classplusapp.com" in url or "media-cdn-alisg.classplusapp.com" in url or "videos.classplusapp" in url or "videos.classplusapp.com" in url or "media-cdn-a.classplusapp" in url or "media-cdn.classplusapp" in url or "alisg-cdn-a.classplusapp" in url:
321 | url = requests.get(f'https://api.classplusapp.com/cams/uploader/video/jw-signed-url?url={url}', headers={'x-access-token': 'eyJjb3Vyc2VJZCI6IjQ1NjY4NyIsInR1dG9ySWQiOm51bGwsIm9yZ0lkIjo0ODA2MTksImNhdGVnb3J5SWQiOm51bGx9r'}).json()['url']
322 |
323 | elif "apps-s3-jw-prod.utkarshapp.com" in url:
324 | if 'enc_plain_mp4' in url:
325 | url = url.replace(url.split("/")[-1], res+'.mp4')
326 |
327 | elif 'Key-Pair-Id' in url:
328 | url = None
329 |
330 | elif '.m3u8' in url:
331 | q = ((m3u8.loads(requests.get(url).text)).data['playlists'][1]['uri']).split("/")[0]
332 | x = url.split("/")[5]
333 | x = url.replace(x, "")
334 | url = ((m3u8.loads(requests.get(url).text)).data['playlists'][1]['uri']).replace(q+"/", x)
335 | if 'amazonaws.com' in url:
336 | url = f"https://master-api-v3.vercel.app/adda-mp4-m3u8?url={url}&quality={raw_text2}&token={raw_text4}"
337 |
338 | name1 = links[i][0].replace("\t", "").replace(":", "").replace("/", "").replace("+", "").replace("#", "").replace("|", "").replace("@", "").replace("*", "").replace(".", "").replace("https", "").replace("http", "").strip()
339 | name = f'{str(count).zfill(3)}) {name1[:60]} {my_name}'
340 |
341 | if "appx" in url and "pdf" in url:
342 | url = f"https://dragoapi.vercel.app/pdf/{url}"
343 | if "appx-recordings-mcdn.akamai.net.in/drm/" in url:
344 | cmd = f'ffmpeg -i "{url}" -c copy -bsf:a aac_adtstoasc "{name}.mp4"'
345 | elif "arvind" in url:
346 | cmd = f'ffmpeg -i "{url}" -c copy -bsf:a aac_adtstoasc "{name}.mp4"'
347 | if ".zip" in url:
348 | url = f"https://video.pablocoder.eu.org/appx-zip?url={url}"
349 |
350 | elif "https://appx-transcoded-videos.livelearn.in/videos/rozgar-data/" in url:
351 | url = url.replace("https://appx-transcoded-videos.livelearn.in/videos/rozgar-data/", "")
352 | name1 = links[i][0].replace("\t", "").replace(":", "").replace("/", "").replace("+", "").replace("#", "").replace("|", "").replace("@", "@").replace("*", "").replace(".", "").replace("https", "").replace("http", "").strip()
353 | name = f'{str(count).zfill(3)}) {name1[:60]}'
354 | cmd = f'yt-dlp -o "{name}.mp4" "{url}"'
355 |
356 | elif "https://appx-transcoded-videos-mcdn.akamai.net.in/videos/bhainskipathshala-data/" in url:
357 | url = url.replace("https://appx-transcoded-videos-mcdn.akamai.net.in/videos/bhainskipathshala-data/", "")
358 | name1 = links[i][0].replace("\t", "").replace(":", "").replace("/", "").replace("+", "").replace("#", "").replace("|", "").replace("@", "@").replace("*", "").replace(".", "").replace("https", "").replace("http", "").strip()
359 | name = f'{str(count).zfill(3)}) {name1[:60]}'
360 | cmd = f'yt-dlp -o "{name}.mp4" "{url}"'
361 |
362 | if '/do' in url:
363 | pdf_id = url.split("/")[-1].split(".pdf")[0]
364 | print(pdf_id)
365 | url = f"https://kgs-v2.akamaized.net/kgs/do/pdfs/{pdf_id}.pdf"
366 |
367 | if 'sec-prod-mediacdn.pw.live' in url:
368 | vid_id = url.split("sec-prod-mediacdn.pw.live/")[1].split("/")[0]
369 | url = f"https://pwplayer-0e2dbbdc0989.herokuapp.com/player?url=https://d1d34p8vz63oiq.cloudfront.net/{vid_id}/master.mpd?token={raw_text4}"
370 |
371 | if 'bitgravity.com' in url:
372 | parts = url.split('/')
373 | part1 = parts[1]
374 | part2 = parts[2]
375 | part3 = parts[3]
376 | part4 = parts[4]
377 | part5 = parts[5]
378 | part6 = parts[6]
379 |
380 | print(f"PART1: {part1}")
381 | print(f"PART2: {part2}")
382 | print(f"PART3: {part3}")
383 | print(f"PART4: {part4}")
384 | print(f"PART5: {part5}")
385 | print(f"PART6: {part6}")
386 | url = f"https://kgs-v2.akamaized.net/{part3}/{part4}/{part5}/{part6}"
387 |
388 | if '?list' in url:
389 | video_id = url.split("/embed/")[1].split("?")[0]
390 | print(video_id)
391 | url = f"https://www.youtube.com/embed/{video_id}"
392 |
393 |
394 | if 'workers.dev' in url:
395 | vid_id = url.split("cloudfront.net/")[1].split("/")[0]
396 | print(vid_id)
397 | url = f"https://madxapi-d0cbf6ac738c.herokuapp.com/{vid_id}/master.m3u8?token={raw_text4}"
398 |
399 | if 'psitoffers.store' in url:
400 | vid_id = url.split("vid=")[1].split("&")[0]
401 | print(f"vid_id = {vid_id}")
402 | url = f"https://madxapi-d0cbf6ac738c.herokuapp.com/{vid_id}/master.m3u8?token={raw_text4}"
403 |
404 | if "edge.api.brightcove.com" in url:
405 | bcov = 'bcov_auth=eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJpYXQiOjE3MjQyMzg3OTEsImNvbiI6eyJpc0FkbWluIjpmYWxzZSwiYXVzZXIiOiJVMFZ6TkdGU2NuQlZjR3h5TkZwV09FYzBURGxOZHowOSIsImlkIjoiZEUxbmNuZFBNblJqVEROVmFWTlFWbXhRTkhoS2R6MDkiLCJmaXJzdF9uYW1lIjoiYVcxV05ITjVSemR6Vm10ak1WUlBSRkF5ZVNzM1VUMDkiLCJlbWFpbCI6Ik5Ga3hNVWhxUXpRNFJ6VlhiR0ppWTJoUk0wMVdNR0pVTlU5clJXSkRWbXRMTTBSU2FHRnhURTFTUlQwPSIsInBob25lIjoiVUhVMFZrOWFTbmQ1ZVcwd1pqUTViRzVSYVc5aGR6MDkiLCJhdmF0YXIiOiJLM1ZzY1M4elMwcDBRbmxrYms4M1JEbHZla05pVVQwOSIsInJlZmVycmFsX2NvZGUiOiJOalZFYzBkM1IyNTBSM3B3VUZWbVRtbHFRVXAwVVQwOSIsImRldmljZV90eXBlIjoiYW5kcm9pZCIsImRldmljZV92ZXJzaW9uIjoiUShBbmRyb2lkIDEwLjApIiwiZGV2aWNlX21vZGVsIjoiU2Ftc3VuZyBTTS1TOTE4QiIsInJlbW90ZV9hZGRyIjoiNTQuMjI2LjI1NS4xNjMsIDU0LjIyNi4yNTUuMTYzIn19.snDdd-PbaoC42OUhn5SJaEGxq0VzfdzO49WTmYgTx8ra_Lz66GySZykpd2SxIZCnrKR6-R10F5sUSrKATv1CDk9ruj_ltCjEkcRq8mAqAytDcEBp72-W0Z7DtGi8LdnY7Vd9Kpaf499P-y3-godolS_7ixClcYOnWxe2nSVD5C9c5HkyisrHTvf6NFAuQC_FD3TzByldbPVKK0ag1UnHRavX8MtttjshnRhv5gJs5DQWj4Ir_dkMcJ4JaVZO3z8j0OxVLjnmuaRBujT-1pavsr1CCzjTbAcBvdjUfvzEhObWfA1-Vl5Y4bUgRHhl1U-0hne4-5fF0aouyu71Y6W0eg'
406 | url = url.split("bcov_auth")[0]+bcov
407 |
408 | if "youtu" in url:
409 | ytf = f"b[height<={raw_text2}][ext=mp4]/bv[height<={raw_text2}][ext=mp4]+ba[ext=m4a]/b[ext=mp4]"
410 | else:
411 | ytf = f"b[height<={raw_text2}]/bv[height<={raw_text2}]+ba/b/bv+ba"
412 |
413 | if "edge.api.brightcove.com" in url:
414 | bcov = 'bcov_auth=eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJpYXQiOjE3MzUxMzUzNjIsImNvbiI6eyJpc0FkbWluIjpmYWxzZSwiYXVzZXIiOiJVMFZ6TkdGU2NuQlZjR3h5TkZwV09FYzBURGxOZHowOSIsImlkIjoiYmt3cmVIWmxZMFUwVXpkSmJYUkxVemw2ZW5Oclp6MDkiLCJmaXJzdF9uYW1lIjoiY25GdVpVdG5kRzR4U25sWVNGTjRiVW94VFhaUVVUMDkiLCJlbWFpbCI6ImFFWllPRXhKYVc1NWQyTlFTazk0YmtWWWJISTNRM3BKZW1OUVdIWXJWWE0wWldFNVIzZFNLelE0ZHowPSIsInBob25lIjoiZFhSNlFrSm9XVlpCYkN0clRUWTFOR3REU3pKTVVUMDkiLCJhdmF0YXIiOiJLM1ZzY1M4elMwcDBRbmxrYms4M1JEbHZla05pVVQwOSIsInJlZmVycmFsX2NvZGUiOiJhVVZGZGpBMk9XSnhlbXRZWm14amF6TTBVazQxUVQwOSIsImRldmljZV90eXBlIjoid2ViIiwiZGV2aWNlX3ZlcnNpb24iOiJDaHJvbWUrMTE5IiwiZGV2aWNlX21vZGVsIjoiY2hyb21lIiwicmVtb3RlX2FkZHIiOiIyNDA5OjQwYzI6MjA1NTo5MGQ0OjYzYmM6YTNjOTozMzBiOmIxOTkifX0.Kifitj1wCe_ohkdclvUt7WGuVBsQFiz7eezXoF1RduDJi4X7egejZlLZ0GCZmEKBwQpMJLvrdbAFIRniZoeAxL4FZ-pqIoYhH3PgZU6gWzKz5pdOCWfifnIzT5b3rzhDuG7sstfNiuNk9f-HMBievswEIPUC_ElazXdZPPt1gQqP7TmVg2Hjj6-JBcG7YPSqa6CUoXNDHpjWxK_KREnjWLM7vQ6J3vF1b7z_S3_CFti167C6UK5qb_turLnOUQzWzcwEaPGB3WXO0DAri6651WF33vzuzeclrcaQcMjum8n7VQ0Cl3fqypjaWD30btHQsu5j8j3pySWUlbyPVDOk-g'
415 | url = url.split("bcov_auth")[0]+bcov
416 |
417 | if "jw-prod" in url:
418 | cmd = f'yt-dlp -o "{name}.mp4" "{url}"'
419 |
420 | elif "webvideos.classplusapp." in url:
421 | cmd = f'yt-dlp --add-header "referer:https://web.classplusapp.com/" --add-header "x-cdn-tag:empty" -f "{ytf}" "{url}" -o "{name}.mp4"'
422 |
423 | elif "youtube.com" in url or "youtu.be" in url:
424 | cmd = f'yt-dlp --cookies youtube_cookies.txt -f "{ytf}" "{url}" -o "{name}".mp4'
425 |
426 | else:
427 | cmd = f'yt-dlp -f "{ytf}" "{url}" -o "{name}.mp4"'
428 | if "youtu" in url:
429 | ytf = f"b[height<={raw_text2}][ext=mp4]/bv[height<={raw_text2}][ext=mp4]+ba[ext=m4a]/b[ext=mp4]"
430 | else:
431 | ytf = f"b[height<={raw_text2}]/bv[height<={raw_text2}]+ba/b/bv+ba"
432 | if "jw-prod" in url:
433 | cmd = f'yt-dlp -o "{name}.mp4" "{url}"'
434 |
435 | elif "youtube.com" in url or "youtu.be" in url:
436 | cmd = f'yt-dlp --cookies youtube_cookies.txt -f "{ytf}" "{url}" -o "{name}".mp4'
437 | else:
438 | cmd = f'yt-dlp -f "{ytf}" "{url}" -o "{name}.mp4"'
439 |
440 | try:
441 |
442 | cc = f'**🎞️ VID_ID: {str(count).zfill(3)}.\n\n📝 Title: {name1} {my_name} {res}.mkv\n\n📚 Batch Name: {b_name}\n\n📥 Extracted By : {CR}\n\n**━━━━━✦{my_name}✦━━━━━**'
443 | cc1 = f'**📁 PDF_ID: {str(count).zfill(3)}.\n\n📝 Title: {name1} {my_name}.pdf\n\n📚 Batch Name: {b_name}\n\n📥 Extracted By : {CR}\n\n**━━━━━✦{my_name}✦━━━━━**'
444 | cyt = f'**🎞️ VID_ID: {str(count).zfill(3)}.\n\n📝 Title: {name1} {my_name}.mkv\n\n📚 Batch Name: {b_name}\n\n**🔗 𝐕𝐢𝐝𝐞𝐨 𝐥𝐢𝐧𝐤 - ({url})**\n\n📥 Extracted By : {CR}\n\n**━━━━━✦{my_name}✦━━━━━**'
445 | ccp = f'**🎞️ VID_ID: {str(count).zfill(3)}.\n\n📝 Title: {name1} {my_name}.mkv\n\n📚 Batch Name: {b_name}\n\n**🔗 𝐕𝐢𝐝𝐞𝐨 𝐥𝐢𝐧𝐤 - ({url})**\n\n📥 Extracted By : {CR}\n\n**━━━━━✦{my_name}✦━━━━━**'
446 | czip = f'**🎞️ VID_ID: {str(count).zfill(3)}.\n\n📝 Title: {name1} {my_name}.mkv\n\n📚 Batch Name: {b_name}\n\n**🔗 𝐕𝐢𝐝𝐞𝐨 𝐥𝐢𝐧𝐤 - ({url})**\n\n📥 Extracted By : {CR}\n\n**━━━━━✦{my_name}✦━━━━━**'
447 | cczip = f'**💾 ZIP_ID: {str(count).zfill(3)}.\n\n📝 Title: {name1} {my_name}.pdf\n\n📚 Batch Name: {b_name}\n\n📥 Extracted By : {CR}\n\n**━━━━━✦{my_name}✦━━━━━**'
448 |
449 |
450 | if "drive" in url:
451 | try:
452 | ka = await helper.download(url, name)
453 | copy = await bot.send_document(chat_id=m.chat.id,document=ka, caption=cc1)
454 | count+=1
455 | os.remove(ka)
456 | time.sleep(1)
457 | except FloodWait as e:
458 | await m.reply_text(str(e))
459 | time.sleep(e.x)
460 | continue
461 |
462 | elif ".pdf" in url:
463 | try:
464 | await asyncio.sleep(4)
465 | # Replace spaces with %20 in the URL
466 | url = url.replace(" ", "%20")
467 |
468 | # Create a cloudscraper session
469 | scraper = cloudscraper.create_scraper()
470 |
471 | # Send a GET request to download the PDF
472 | response = scraper.get(url)
473 |
474 | # Check if the response status is OK
475 | if response.status_code == 200:
476 | # Write the PDF content to a file
477 | with open(f'{name}.pdf', 'wb') as file:
478 | file.write(response.content)
479 |
480 | # Send the PDF document
481 | await asyncio.sleep(4)
482 | copy = await bot.send_document(chat_id=m.chat.id, document=f'{name}.pdf', caption=cc1)
483 | count += 1
484 |
485 | # Remove the PDF file after sending
486 | os.remove(f'{name}.pdf')
487 | else:
488 | await m.reply_text(f"Failed to download PDF: {response.status_code} {response.reason}")
489 |
490 | except FloodWait as e:
491 | await m.reply_text(str(e))
492 | time.sleep(e.x)
493 | continue
494 |
495 | elif "*--appx-pdf" in url or "*--appx-pdf?key=" in url:
496 | try:
497 | # Extract key and clean URL
498 | if "*--appx-pdf?key=" in url:
499 | url, key = url.split('*--appx-pdf?key=')
500 | key = key.strip()
501 | elif "*--appx-pdf" in url:
502 | url, key = url.split('*--appx-pdf')
503 | key = key.strip()
504 | else:
505 | url, key = url.split('*')
506 | key = key.strip()
507 |
508 | if not key:
509 | raise ValueError("Decryption key is empty")
510 |
511 | print(f"Processing PDF - URL: {url}\nKey: {key}")
512 |
513 | # Download PDF
514 | cmd = f'yt-dlp -o "{name}.pdf" "{url}"'
515 | download_cmd = f"{cmd} -R 25 --fragment-retries 25"
516 | os.system(download_cmd)
517 |
518 | pdf_path = f'{name}.pdf'
519 |
520 | if not os.path.exists(pdf_path):
521 | raise FileNotFoundError("PDF download failed")
522 |
523 | print(f"PDF downloaded successfully to {pdf_path}")
524 | file_size = os.path.getsize(pdf_path)
525 | print(f"PDF size: {file_size} bytes")
526 |
527 | # Decrypt PDF
528 | with open(pdf_path, "r+b") as file:
529 | try:
530 | mmapped_file = mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_WRITE)
531 | decrypt_size = min(file_size, 28)
532 |
533 | for i in range(decrypt_size):
534 | current_byte = mmapped_file[i]
535 | if i < len(key):
536 | mmapped_file[i] = current_byte ^ ord(key[i])
537 | else:
538 | mmapped_file[i] = current_byte ^ i
539 |
540 | mmapped_file.flush()
541 | mmapped_file.close()
542 | print("PDF decryption completed")
543 | except Exception as e:
544 | raise Exception(f"Decryption failed: {str(e)}")
545 |
546 | # Send file
547 | await bot.send_document(chat_id=m.chat.id, document=pdf_path, caption=cc1)
548 | count += 1
549 | print("PDF sent successfully")
550 |
551 | except Exception as e:
552 | error_msg = f"PDF processing failed: {str(e)}"
553 | print(error_msg)
554 | await m.reply_text(error_msg)
555 | continue
556 | finally:
557 | # Cleanup
558 | if 'pdf_path' in locals() and os.path.exists(pdf_path):
559 | os.remove(pdf_path)
560 | print("Temporary PDF file removed")
561 | time.sleep(5)
562 |
563 | elif "zip" in url:
564 | try:
565 | await bot.send_photo(chat_id=m.chat.id, photo=appxzip, caption=czip)
566 | count +=1
567 | except Exception as e:
568 | await m.reply_text(str(e))
569 | time.sleep(1)
570 | continue
571 |
572 | elif "youtu" in url:
573 | try:
574 | await bot.send_photo(chat_id=m.chat.id, photo=photo, caption=cyt)
575 | count +=1
576 | except Exception as e:
577 | await m.reply_text(str(e))
578 | time.sleep(1)
579 | continue
580 |
581 | elif "media-cdn.classplusapp.com/drm/" in url:
582 | try:
583 | await bot.send_photo(chat_id=m.chat.id, photo=cpphoto, caption=ccp)
584 | count +=1
585 | except Exception as e:
586 | await m.reply_text(str(e))
587 | time.sleep(1)
588 | continue
589 | elif any(ext in url for ext in [".jpg", ".jpeg", ".png"]):
590 | try:
591 | ext = url.split('.')[-1]
592 | cmd = f'yt-dlp -o "{name1}.{ext}" "{url}"'
593 | download_cmd = f"{cmd} -R 25 --fragment-retries 25"
594 | os.system(download_cmd)
595 | cc3 = f'**🖼️ IMG_ID: {str(count).zfill(3)}.\n\n📝 Title: {name1} {my_name}.{ext}\n\n📚 Batch Name: {b_name}\n\n📥 Extracted By : {CR}\n\n**━━━━━✦{my_name}✦━━━━━**'
596 | await bot.send_document(chat_id=m.chat.id, document=f'{name}.{ext}', caption=cc3)
597 | count += 1
598 | os.remove(f'{name}.{ext}')
599 | time.sleep(3)
600 | except FloodWait as e:
601 | await m.reply_text(str(e))
602 | time.sleep(e.x)
603 | continue
604 | elif any(ext in url for ext in [".mp3", ".wav", ".m4a"]):
605 | try:
606 | ext = url.split('.')[-1]
607 | cmd = f'yt-dlp -x --audio-format {ext} -o "{name}.{ext}" "{url}"'
608 | download_cmd = f"{cmd} -R 25 --fragment-retries 25"
609 | os.system(download_cmd)
610 | cc2 = f'**🎵 MP3_ID: {str(count).zfill(3)}.\n\n📝 Title: {name1} {my_name}.{ext}\n\n📚 Batch Name: {b_name}\n\n📥 Extracted By : {CR}\n\n**━━━━━✦{my_name}✦━━━━━**'
611 | await bot.send_document(chat_id=m.chat.id, document=f'{name}.{ext}', caption=cc2)
612 | count += 1
613 | os.remove(f'{name}.{ext}')
614 | except FloodWait as e:
615 | await m.reply_text(str(e))
616 | time.sleep(e.x)
617 | continue
618 |
619 | elif ".ws" in url:
620 | try:
621 | html_filename = f"{name}.html"
622 | helper.download_html_file(url, html_filename)
623 | cc5 = f'**🌐 HTML_ID: {str(count).zfill(3)}.\n\n📝 Title: {name1} {my_name}.{ext}\n\n📚 Batch Name: {b_name}\n\n📥 Extracted By : {CR}\n\n**━━━━━✦{my_name}✦━━━━━**'
624 | copy = await bot.send_document(chat_id=m.chat.id, document=html_filename, caption=cc5)
625 | # Clean up files
626 | os.remove(html_filename)
627 | count += 1
628 | time.sleep(3)
629 | except Exception as e:
630 | await m.reply_text(str(e))
631 | time.sleep(5)
632 | continue
633 |
634 | elif any(ext in url for ext in [".jpg", ".jpeg", ".png"]):
635 | try:
636 | ext = url.split('.')[-1]; cmd = f'yt-dlp -o "{name1}.{ext}" "{url}"'; download_cmd = f"{cmd} -R 25 --fragment-retries 25"; os.system(download_cmd); cc3 = f'**🖼️ IMG_ID: {str(count).zfill(3)}.\n\n📝 Title: {name1} {my_name}.{ext}\n\n📚 Batch Name: {b_name}\n\n📥 Extracted By : {CR}\n\n**━━━━━✦{my_name}✦━━━━━**'; await bot.send_document(chat_id=m.chat.id, document=f'{name}.{ext}', caption=cc3); count += 1; os.remove(f'{name}.{ext}'); time.sleep(3)
637 | except FloodWait as e: await m.reply_text(str(e)); time.sleep(e.x); continue
638 |
639 | elif ".pdf" in url:
640 |
641 | try:
642 | cmd = f'yt-dlp -o "{name}.pdf" "{url}"'
643 | download_cmd = f"{cmd} -R 25 --fragment-retries 25"
644 | os.system(download_cmd)
645 | copy = await bot.send_document(chat_id=m.chat.id, document=f'{name1}.pdf', caption=cc1)
646 | count +=1
647 | os.remove(f'{name}.pdf')
648 | except FloodWait as e:
649 | await m.reply_text(str(e))
650 | time.sleep(e.x)
651 | continue
652 |
653 | elif ".pdf" in url:
654 | try:
655 | cmd = f'yt-dlp -o "{name}.pdf" "{url}"'
656 | download_cmd = f"{cmd} -R 25 --fragment-retries 25"
657 | os.system(download_cmd)
658 | copy = await bot.send_document(chat_id=m.chat.id, document=f'{name}.pdf', caption=cc1)
659 | count += 1
660 | os.remove(f'{name}.pdf')
661 | except FloodWait as e:
662 | await m.reply_text(str(e))
663 | time.sleep(e.x)
664 | continue
665 |
666 | else:
667 | Show = f"📥 𝐃𝐨𝐰𝐧𝐥𝐨𝐚𝐝𝐢𝐧𝐠 »\n\n📝 Title:- `{name}\n\n**🔗 𝐓𝐨𝐭𝐚𝐥 𝐔𝐑𝐋 »** ✨{len(links)}✨\n\n⌨ 𝐐𝐮𝐚𝐥𝐢𝐭𝐲 » {raw_text2}`\n\n**𝐁𝐨𝐭 𝐌𝐚𝐝𝐞 𝐁𝐲 ✦ 𝗦𝗣𝗜𝗗𝗬"
668 | prog = await m.reply_text(Show)
669 | res_file = await helper.download_video(url, cmd, name)
670 | filename = res_file
671 | await prog.delete(True)
672 | await helper.send_vid(bot, m, cc, filename, thumb, name, prog)
673 | count += 1
674 | time.sleep(1)
675 |
676 | except Exception as e:
677 | await m.reply_text(
678 | f"⌘ 𝐃𝐨𝐰𝐧𝐥𝐨𝐚𝐝𝐢𝐧𝐠 𝐈𝐧𝐭𝐞𝐫𝐮𝐩𝐭𝐞𝐝 ❌ \n\n⌘ 𝐍𝐚𝐦𝐞 » {name}\n⌘ 𝐋𝐢𝐧𝐤 » `{url}`"
679 | )
680 | continue
681 |
682 | except Exception as e:
683 | await m.reply_text(e)
684 | await m.reply_text("**✅ 𝐒𝐮𝐜𝐜𝐞𝐬𝐬𝐟𝐮𝐥𝐥𝐲 𝐃𝐨𝐧𝐞**")
685 |
686 | # Advance
687 |
688 | @bot.on_message(filters.command(["bravo"]) )
689 | async def txt_handler(bot: Client, m: Message):
690 | editable = await m.reply_text(f"**📁Send me the TXT file and wait.**")
691 | input: Message = await bot.listen(editable.chat.id)
692 | x = await input.download()
693 | await input.delete(True)
694 | file_name, ext = os.path.splitext(os.path.basename(x))
695 | credit = f"𝗦𝗣𝗜𝗗𝗬™🇮🇳"
696 |
697 | try:
698 | with open(x, "r") as f:
699 | content = f.read()
700 | content = content.split("\n")
701 | links = []
702 | for i in content:
703 | links.append(i.split("://", 1))
704 | os.remove(x)
705 | except:
706 | await m.reply_text("Invalid file input.")
707 | os.remove(x)
708 | return
709 |
710 | await editable.edit(f"Total links found are **{len(links)}**\n\nSend From where you want to download initial is **1**")
711 | input0: Message = await bot.listen(editable.chat.id)
712 | raw_text = input0.text
713 | await input0.delete(True)
714 | try:
715 | arg = int(raw_text)
716 | except:
717 | arg = 1
718 | await editable.edit("**Enter Your Batch Name or send d for grabing from text filename.**")
719 | input1: Message = await bot.listen(editable.chat.id)
720 | raw_text0 = input1.text
721 | await input1.delete(True)
722 | if raw_text0 == 'd':
723 | b_name = file_name
724 | else:
725 | b_name = raw_text0
726 |
727 | await editable.edit("**Enter resolution.\n Eg : 480 or 720**")
728 | input2: Message = await bot.listen(editable.chat.id)
729 | raw_text2 = input2.text
730 | await input2.delete(True)
731 | try:
732 | if raw_text2 == "144":
733 | res = "144x256"
734 | elif raw_text2 == "240":
735 | res = "240x426"
736 | elif raw_text2 == "360":
737 | res = "360x640"
738 | elif raw_text2 == "480":
739 | res = "480x854"
740 | elif raw_text2 == "720":
741 | res = "720x1280"
742 | elif raw_text2 == "1080":
743 | res = "1080x1920"
744 | else:
745 | res = "UN"
746 | except Exception:
747 | res = "UN"
748 |
749 | await editable.edit("**Enter Your Name or send 'de' for use default.\n Eg : 𝗦𝗣𝗜𝗗𝗬™👨🏻💻**")
750 | input3: Message = await bot.listen(editable.chat.id)
751 | raw_text3 = input3.text
752 | await input3.delete(True)
753 | if raw_text3 == 'de':
754 | CR = credit
755 | else:
756 | CR = raw_text3
757 |
758 |
759 | await editable.edit("Now send the **Thumb url**\n**Eg :** ``\n\nor Send `no`")
760 | input6 = message = await bot.listen(editable.chat.id)
761 | raw_text6 = input6.text
762 | await input6.delete(True)
763 | await editable.delete()
764 |
765 | thumb = input6.text
766 | if thumb.startswith("http://") or thumb.startswith("https://"):
767 | getstatusoutput(f"wget '{thumb}' -O 'thumb.jpg'")
768 | thumb = "thumb.jpg"
769 | else:
770 | thumb == "no"
771 |
772 | count =int(raw_text)
773 | try:
774 | for i in range(arg-1, len(links)):
775 |
776 | Vxy = links[i][1].replace("file/d/","uc?export=download&id=").replace("www.youtube-nocookie.com/embed", "youtu.be").replace("?modestbranding=1", "").replace("/view?usp=sharing","")
777 | url = "https://" + Vxy
778 | if "visionias" in url:
779 | async with ClientSession() as session:
780 | async with session.get(url, headers={'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Accept-Language': 'en-US,en;q=0.9', 'Cache-Control': 'no-cache', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Referer': 'http://www.visionias.in/', 'Sec-Fetch-Dest': 'iframe', 'Sec-Fetch-Mode': 'navigate', 'Sec-Fetch-Site': 'cross-site', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Linux; Android 12; RMX2121) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Mobile Safari/537.36', 'sec-ch-ua': '"Chromium";v="107", "Not=A?Brand";v="24"', 'sec-ch-ua-mobile': '?1', 'sec-ch-ua-platform': '"Android"',}) as resp:
781 | text = await resp.text()
782 | url = re.search(r"(https://.*?playlist.m3u8.*?)\"", text).group(1)
783 |
784 | if "acecwply" in url:
785 | cmd = f'yt-dlp -o "{name}.%(ext)s" -f "bestvideo[height<={raw_text2}]+bestaudio" --hls-prefer-ffmpeg --no-keep-video --remux-video mkv --no-warning "{url}"'
786 |
787 |
788 | if "visionias" in url:
789 | async with ClientSession() as session:
790 | async with session.get(url, headers={'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Accept-Language': 'en-US,en;q=0.9', 'Cache-Control': 'no-cache', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Referer': 'http://www.visionias.in/', 'Sec-Fetch-Dest': 'iframe', 'Sec-Fetch-Mode': 'navigate', 'Sec-Fetch-Site': 'cross-site', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Linux; Android 12; RMX2121) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Mobile Safari/537.36', 'sec-ch-ua': '"Chromium";v="107", "Not=A?Brand";v="24"', 'sec-ch-ua-mobile': '?1', 'sec-ch-ua-platform': '"Android"',}) as resp:
791 | text = await resp.text()
792 | url = re.search(r"(https://.*?playlist.m3u8.*?)\"", text).group(1)
793 |
794 | elif 'videos.classplusapp' in url or "tencdn.classplusapp" in url or "webvideos.classplusapp.com" in url or "media-cdn-alisg.classplusapp.com" in url or "videos.classplusapp" in url or "videos.classplusapp.com" in url or "media-cdn-a.classplusapp" in url or "media-cdn.classplusapp" in url or "alisg-cdn-a.classplusapp" in url:
795 | url = requests.get(f'https://api.classplusapp.com/cams/uploader/video/jw-signed-url?url={url}', headers={'x-access-token': 'eyJjb3Vyc2VJZCI6IjQ1NjY4NyIsInR1dG9ySWQiOm51bGwsIm9yZ0lkIjo0ODA2MTksImNhdGVnb3J5SWQiOm51bGx9r'}).json()['url']
796 |
797 | elif "apps-s3-jw-prod.utkarshapp.com" in url:
798 | if 'enc_plain_mp4' in url:
799 | url = url.replace(url.split("/")[-1], res+'.mp4')
800 |
801 | elif 'Key-Pair-Id' in url:
802 | url = None
803 |
804 | elif '.m3u8' in url:
805 | q = ((m3u8.loads(requests.get(url).text)).data['playlists'][1]['uri']).split("/")[0]
806 | x = url.split("/")[5]
807 | x = url.replace(x, "")
808 | url = ((m3u8.loads(requests.get(url).text)).data['playlists'][1]['uri']).replace(q+"/", x)
809 |
810 |
811 |
812 | elif '/master.mpd' in url:
813 | vid_id = url.split("/")[-2]
814 | url = f"https://pw-links-api.onrender.com/process?v=https://sec1.pw.live/{vid_id}/master.mpd&quality={raw_text2}"
815 |
816 | name1 = links[i][0].replace("\t", "").replace(":", "").replace("/", "").replace("+", "").replace("#", "").replace("|", "").replace("@", "").replace("*", "").replace(".", "").replace("https", "").replace("http", "").strip()
817 | name = f'{str(count).zfill(3)}) {name1[:60]} {my_name}'
818 |
819 |
820 | if "edge.api.brightcove.com" in url:
821 | bcov = 'bcov_auth=eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJpYXQiOjE3MjQyMzg3OTEsImNvbiI6eyJpc0FkbWluIjpmYWxzZSwiYXVzZXIiOiJVMFZ6TkdGU2NuQlZjR3h5TkZwV09FYzBURGxOZHowOSIsImlkIjoiZEUxbmNuZFBNblJqVEROVmFWTlFWbXhRTkhoS2R6MDkiLCJmaXJzdF9uYW1lIjoiYVcxV05ITjVSemR6Vm10ak1WUlBSRkF5ZVNzM1VUMDkiLCJlbWFpbCI6Ik5Ga3hNVWhxUXpRNFJ6VlhiR0ppWTJoUk0wMVdNR0pVTlU5clJXSkRWbXRMTTBSU2FHRnhURTFTUlQwPSIsInBob25lIjoiVUhVMFZrOWFTbmQ1ZVcwd1pqUTViRzVSYVc5aGR6MDkiLCJhdmF0YXIiOiJLM1ZzY1M4elMwcDBRbmxrYms4M1JEbHZla05pVVQwOSIsInJlZmVycmFsX2NvZGUiOiJOalZFYzBkM1IyNTBSM3B3VUZWbVRtbHFRVXAwVVQwOSIsImRldmljZV90eXBlIjoiYW5kcm9pZCIsImRldmljZV92ZXJzaW9uIjoiUShBbmRyb2lkIDEwLjApIiwiZGV2aWNlX21vZGVsIjoiU2Ftc3VuZyBTTS1TOTE4QiIsInJlbW90ZV9hZGRyIjoiNTQuMjI2LjI1NS4xNjMsIDU0LjIyNi4yNTUuMTYzIn19.snDdd-PbaoC42OUhn5SJaEGxq0VzfdzO49WTmYgTx8ra_Lz66GySZykpd2SxIZCnrKR6-R10F5sUSrKATv1CDk9ruj_ltCjEkcRq8mAqAytDcEBp72-W0Z7DtGi8LdnY7Vd9Kpaf499P-y3-godolS_7ixClcYOnWxe2nSVD5C9c5HkyisrHTvf6NFAuQC_FD3TzByldbPVKK0ag1UnHRavX8MtttjshnRhv5gJs5DQWj4Ir_dkMcJ4JaVZO3z8j0OxVLjnmuaRBujT-1pavsr1CCzjTbAcBvdjUfvzEhObWfA1-Vl5Y4bUgRHhl1U-0hne4-5fF0aouyu71Y6W0eg'
822 | url = url.split("bcov_auth")[0]+bcov
823 |
824 |
825 | if 'sec-prod-mediacdn.pw.live' in url:
826 | vid_id = url.split("sec-prod-mediacdn.pw.live/")[1].split("/")[0]
827 | url = f"https://pwplayer-0e2dbbdc0989.herokuapp.com/player?url=https://d1d34p8vz63oiq.cloudfront.net/{vid_id}/master.mpd?token={raw_text4}"
828 |
829 | if '?list' in url:
830 | video_id = url.split("/embed/")[1].split("?")[0]
831 | print(video_id)
832 | url = f"https://www.youtube.com/embed/{video_id}"
833 |
834 | if "youtu" in url:
835 | ytf = f"b[height<={raw_text2}][ext=mp4]/bv[height<={raw_text2}][ext=mp4]+ba[ext=m4a]/b[ext=mp4]"
836 | else:
837 | ytf = f"b[height<={raw_text2}]/bv[height<={raw_text2}]+ba/b/bv+ba"
838 |
839 | if "jw-prod" in url:
840 | cmd = f'yt-dlp -o "{name}.mp4" "{url}"'
841 |
842 | elif "youtube.com" in url or "youtu.be" in url:
843 | cmd = f'yt-dlp --cookies youtube_cookies.txt -f "{ytf}" "{url}" -o "{name}".mp4'
844 |
845 | else:
846 | cmd = f'yt-dlp -f "{ytf}" "{url}" -o "{name}.mp4"'
847 |
848 | try:
849 |
850 | cc = f'**🎞️ VID_ID: {str(count).zfill(3)}.\n\nTitle: {name1} @Spidy_Universe {res}.mkv\n\n📚 Batch Name: {b_name}\n\n📥 Extracted By : {CR}\n\n**━━━━━✦📖🇮🇳📖✦━━━━━**'
851 | cc1 = f'**📁 PDF_ID: {str(count).zfill(3)}.\n\nTitle: {name1} @Spidy_Universe.pdf\n\n📚 Batch Name: {b_name}\n\n📥 Extracted By : {CR}\n\n**━━━━━✦📖🇮🇳📖✦━━━━━**'
852 |
853 |
854 | if "drive" in url:
855 | try:
856 | ka = await helper.download(url, name)
857 | copy = await bot.send_document(chat_id=m.chat.id,document=ka, caption=cc1)
858 | count+=1
859 | os.remove(ka)
860 | time.sleep(1)
861 | except FloodWait as e:
862 | await m.reply_text(str(e))
863 | time.sleep(e.x)
864 | continue
865 |
866 | elif ".pdf" in url:
867 | try:
868 | await asyncio.sleep(4)
869 | # Replace spaces with %20 in the URL
870 | url = url.replace(" ", "%20")
871 |
872 | # Create a cloudscraper session
873 | scraper = cloudscraper.create_scraper()
874 |
875 | # Send a GET request to download the PDF
876 | response = scraper.get(url)
877 |
878 | # Check if the response status is OK
879 | if response.status_code == 200:
880 | # Write the PDF content to a file
881 | with open(f'{name}.pdf', 'wb') as file:
882 | file.write(response.content)
883 |
884 | # Send the PDF document
885 | await asyncio.sleep(4)
886 | copy = await bot.send_document(chat_id=m.chat.id, document=f'{name}.pdf', caption=cc1)
887 | count += 1
888 |
889 | # Remove the PDF file after sending
890 | os.remove(f'{name}.pdf')
891 | else:
892 | await m.reply_text(f"Failed to download PDF: {response.status_code} {response.reason}")
893 |
894 | except FloodWait as e:
895 | await m.reply_text(str(e))
896 | time.sleep(e.x)
897 | continue
898 |
899 | elif ".pdf" in url:
900 | try:
901 | cmd = f'yt-dlp -o "{name}.pdf" "{url}"'
902 | download_cmd = f"{cmd} -R 25 --fragment-retries 25"
903 | os.system(download_cmd)
904 | copy = await bot.send_document(chat_id=m.chat.id, document=f'{name}.pdf', caption=cc1)
905 | count += 1
906 | os.remove(f'{name}.pdf')
907 | except FloodWait as e:
908 | await m.reply_text(str(e))
909 | time.sleep(e.x)
910 | continue
911 |
912 | else:
913 | Show = f"📥 𝐃𝐨𝐰𝐧𝐥𝐨𝐚𝐝𝐢𝐧𝐠 »\n\n📝 Title:- `{name}\n\n**🔗 𝐓𝐨𝐭𝐚𝐥 𝐔𝐑𝐋 »** ✨{len(links)}✨\n\n⌨ 𝐐𝐮𝐥𝐢𝐭𝐲 » {raw_text2}`\n\n**🔗 𝐔𝐑𝐋 »** `{url}`\n\n**𝐁𝐨𝐭 𝐌𝐚𝐝𝐞 𝐁𝐲 ✦ 🅂🄿🄸🄳🅈"
914 | prog = await m.reply_text(Show)
915 | res_file = await helper.download_video(url, cmd, name)
916 | filename = res_file
917 | await prog.delete(True)
918 | await helper.send_vid(bot, m, cc, filename, thumb, name, prog)
919 | count += 1
920 | time.sleep(1)
921 |
922 | except Exception as e:
923 | await m.reply_text(
924 | f"⌘ 𝐃𝐨𝐰𝐧𝐥𝐨𝐚𝐝𝐢𝐧𝐠 𝐈𝐧𝐭𝐞𝐫𝐮𝐩𝐭𝐞𝐝\n\n⌘ 𝐍𝐚𝐦𝐞 » {name}\n⌘ 𝐋𝐢𝐧𝐤 » `{url}`"
925 | )
926 | continue
927 |
928 | except Exception as e:
929 | await m.reply_text(e)
930 | await m.reply_text("🔰Done🔰")
931 |
932 | # Ankitshakya
933 |
934 | @bot.on_message(filters.command(["spidy"]))
935 | async def txt_handler(bot: Client, m: Message):
936 | editable = await m.reply_text(f"**🔹Send me the TXT file and wait.**")
937 | input: Message = await bot.listen(editable.chat.id)
938 | x = await input.download()
939 | await input.delete(True)
940 | file_name, ext = os.path.splitext(os.path.basename(x))
941 | credit = f"𝗦𝗣𝗜𝗗𝗬™🇮🇳"
942 | try:
943 | with open(x, "r") as f:
944 | content = f.read()
945 | content = content.split("\n")
946 | links = []
947 | for i in content:
948 | links.append(i.split("://", 1))
949 | os.remove(x)
950 | except:
951 | await m.reply_text("Invalid file input.")
952 | os.remove(x)
953 | return
954 |
955 | await editable.edit(f"Total links found are **{len(links)}**\n\nSend From where you want to download initial is **1**")
956 | input0: Message = await bot.listen(editable.chat.id)
957 | raw_text = input0.text
958 | await input0.delete(True)
959 | try:
960 | arg = int(raw_text)
961 | except:
962 | arg = 1
963 | await editable.edit("**Enter Your Batch Name or send d for grabing from text filename.**")
964 | input1: Message = await bot.listen(editable.chat.id)
965 | raw_text0 = input1.text
966 | await input1.delete(True)
967 | if raw_text0 == 'd':
968 | b_name = file_name
969 | else:
970 | b_name = raw_text0
971 |
972 | await editable.edit("**Enter resolution.\n Eg : 480 or 720**")
973 | input2: Message = await bot.listen(editable.chat.id)
974 | raw_text2 = input2.text
975 | await input2.delete(True)
976 | try:
977 | if raw_text2 == "144":
978 | res = "144x256"
979 | elif raw_text2 == "240":
980 | res = "240x426"
981 | elif raw_text2 == "360":
982 | res = "360x640"
983 | elif raw_text2 == "480":
984 | res = "480x854"
985 | elif raw_text2 == "720":
986 | res = "720x1280"
987 | elif raw_text2 == "1080":
988 | res = "1080x1920"
989 | else:
990 | res = "UN"
991 | except Exception:
992 | res = "UN"
993 |
994 | await editable.edit("**Enter Your Name or send 'de' for use default.\n Eg : 𝗦𝗣𝗜𝗗𝗬™👨🏻💻**")
995 | input3: Message = await bot.listen(editable.chat.id)
996 | raw_text3 = input3.text
997 | await input3.delete(True)
998 | if raw_text3 == 'de':
999 | CR = credit
1000 | else:
1001 | CR = raw_text3
1002 |
1003 | await editable.edit("Now send the **Thumb url**\n**Eg :** ``\n\nor Send `no`")
1004 | input6 = message = await bot.listen(editable.chat.id)
1005 | raw_text6 = input6.text
1006 | await input6.delete(True)
1007 | await editable.delete()
1008 |
1009 | thumb = input6.text
1010 | if thumb.startswith("http://") or thumb.startswith("https://"):
1011 | getstatusoutput(f"wget '{thumb}' -O 'thumb.jpg'")
1012 | thumb = "thumb.jpg"
1013 | else:
1014 | thumb == "no"
1015 |
1016 | count =int(raw_text)
1017 | try:
1018 | for i in range(arg-1, len(links)):
1019 |
1020 | Vxy = links[i][1].replace("file/d/","uc?export=download&id=").replace("www.youtube-nocookie.com/embed", "youtu.be").replace("?modestbranding=1", "").replace("/view?usp=sharing","")
1021 | url = "https://" + Vxy
1022 | if "visionias" in url:
1023 | async with ClientSession() as session:
1024 | async with session.get(url, headers={'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Accept-Language': 'en-US,en;q=0.9', 'Cache-Control': 'no-cache', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Referer': 'http://www.visionias.in/', 'Sec-Fetch-Dest': 'iframe', 'Sec-Fetch-Mode': 'navigate', 'Sec-Fetch-Site': 'cross-site', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Linux; Android 12; RMX2121) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Mobile Safari/537.36', 'sec-ch-ua': '"Chromium";v="107", "Not=A?Brand";v="24"', 'sec-ch-ua-mobile': '?1', 'sec-ch-ua-platform': '"Android"',}) as resp:
1025 | text = await resp.text()
1026 | url = re.search(r"(https://.*?playlist.m3u8.*?)\"", text).group(1)
1027 |
1028 | if "acecwply" in url:
1029 | cmd = f'yt-dlp -o "{name}.%(ext)s" -f "bestvideo[height<={raw_text2}]+bestaudio" --hls-prefer-ffmpeg --no-keep-video --remux-video mkv --no-warning "{url}"'
1030 |
1031 |
1032 | if "visionias" in url:
1033 | async with ClientSession() as session:
1034 | async with session.get(url, headers={'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Accept-Language': 'en-US,en;q=0.9', 'Cache-Control': 'no-cache', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Referer': 'http://www.visionias.in/', 'Sec-Fetch-Dest': 'iframe', 'Sec-Fetch-Mode': 'navigate', 'Sec-Fetch-Site': 'cross-site', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Linux; Android 12; RMX2121) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Mobile Safari/537.36', 'sec-ch-ua': '"Chromium";v="107", "Not=A?Brand";v="24"', 'sec-ch-ua-mobile': '?1', 'sec-ch-ua-platform': '"Android"',}) as resp:
1035 | text = await resp.text()
1036 | url = re.search(r"(https://.*?playlist.m3u8.*?)\"", text).group(1)
1037 |
1038 | elif 'videos.classplusapp' in url or "tencdn.classplusapp" in url or "webvideos.classplusapp.com" in url or "media-cdn-alisg.classplusapp.com" in url or "videos.classplusapp" in url or "videos.classplusapp.com" in url or "media-cdn-a.classplusapp" in url or "media-cdn.classplusapp" in url:
1039 | url = requests.get(f'https://api.classplusapp.com/cams/uploader/video/jw-signed-url?url={url}', headers={'x-access-token': 'eyJjb3Vyc2VJZCI6IjQ1NjY4NyIsInR1dG9ySWQiOm51bGwsIm9yZ0lkIjo0ODA2MTksImNhdGVnb3J5SWQiOm51bGx9r'}).json()['url']
1040 |
1041 | elif "apps-s3-jw-prod.utkarshapp.com" in url:
1042 | if 'enc_plain_mp4' in url:
1043 | url = url.replace(url.split("/")[-1], res+'.mp4')
1044 |
1045 | elif 'Key-Pair-Id' in url:
1046 | url = None
1047 |
1048 | elif '.m3u8' in url:
1049 | q = ((m3u8.loads(requests.get(url).text)).data['playlists'][1]['uri']).split("/")[0]
1050 | x = url.split("/")[5]
1051 | x = url.replace(x, "")
1052 | url = ((m3u8.loads(requests.get(url).text)).data['playlists'][1]['uri']).replace(q+"/", x)
1053 |
1054 | elif '/master.mpd' in url:
1055 | vid_id = url.split("/")[-2]
1056 | url = f"https://pw-links-api.onrender.com/process?v=https://sec1.pw.live/{vid_id}/master.mpd&quality={raw_text2}"
1057 |
1058 | name1 = links[i][0].replace("\t", "").replace(":", "").replace("/", "").replace("+", "").replace("#", "").replace("|", "").replace("@", "").replace("*", "").replace(".", "").replace("https", "").replace("http", "").strip()
1059 | name = f'{str(count).zfill(3)}) {name1[:60]} {my_name}'
1060 |
1061 |
1062 | if "edge.api.brightcove.com" in url:
1063 | bcov = 'bcov_auth=eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJpYXQiOjE3MjQyMzg3OTEsImNvbiI6eyJpc0FkbWluIjpmYWxzZSwiYXVzZXIiOiJVMFZ6TkdGU2NuQlZjR3h5TkZwV09FYzBURGxOZHowOSIsImlkIjoiZEUxbmNuZFBNblJqVEROVmFWTlFWbXhRTkhoS2R6MDkiLCJmaXJzdF9uYW1lIjoiYVcxV05ITjVSemR6Vm10ak1WUlBSRkF5ZVNzM1VUMDkiLCJlbWFpbCI6Ik5Ga3hNVWhxUXpRNFJ6VlhiR0ppWTJoUk0wMVdNR0pVTlU5clJXSkRWbXRMTTBSU2FHRnhURTFTUlQwPSIsInBob25lIjoiVUhVMFZrOWFTbmQ1ZVcwd1pqUTViRzVSYVc5aGR6MDkiLCJhdmF0YXIiOiJLM1ZzY1M4elMwcDBRbmxrYms4M1JEbHZla05pVVQwOSIsInJlZmVycmFsX2NvZGUiOiJOalZFYzBkM1IyNTBSM3B3VUZWbVRtbHFRVXAwVVQwOSIsImRldmljZV90eXBlIjoiYW5kcm9pZCIsImRldmljZV92ZXJzaW9uIjoiUShBbmRyb2lkIDEwLjApIiwiZGV2aWNlX21vZGVsIjoiU2Ftc3VuZyBTTS1TOTE4QiIsInJlbW90ZV9hZGRyIjoiNTQuMjI2LjI1NS4xNjMsIDU0LjIyNi4yNTUuMTYzIn19.snDdd-PbaoC42OUhn5SJaEGxq0VzfdzO49WTmYgTx8ra_Lz66GySZykpd2SxIZCnrKR6-R10F5sUSrKATv1CDk9ruj_ltCjEkcRq8mAqAytDcEBp72-W0Z7DtGi8LdnY7Vd9Kpaf499P-y3-godolS_7ixClcYOnWxe2nSVD5C9c5HkyisrHTvf6NFAuQC_FD3TzByldbPVKK0ag1UnHRavX8MtttjshnRhv5gJs5DQWj4Ir_dkMcJ4JaVZO3z8j0OxVLjnmuaRBujT-1pavsr1CCzjTbAcBvdjUfvzEhObWfA1-Vl5Y4bUgRHhl1U-0hne4-5fF0aouyu71Y6W0eg'
1064 | url = url.split("bcov_auth")[0]+bcov
1065 |
1066 | if '/do' in url:
1067 | pdf_id = url.split("/")[-1].split(".pdf")[0]
1068 | print(pdf_id)
1069 | url = f"https://kgs-v2.akamaized.net/kgs/do/pdfs/{pdf_id}.pdf"
1070 |
1071 | if 'sec-prod-mediacdn.pw.live' in url:
1072 | vid_id = url.split("sec-prod-mediacdn.pw.live/")[1].split("/")[0]
1073 | url = f"https://pwplayer-0e2dbbdc0989.herokuapp.com/player?url=https://d1d34p8vz63oiq.cloudfront.net/{vid_id}/master.mpd?token={raw_text4}"
1074 |
1075 | if 'bitgravity.com' in url:
1076 | parts = url.split('/')
1077 | part1 = parts[1]
1078 | part2 = parts[2]
1079 | part3 = parts[3]
1080 | part4 = parts[4]
1081 | part5 = parts[5]
1082 | part6 = parts[6]
1083 |
1084 | print(f"PART1: {part1}")
1085 | print(f"PART2: {part2}")
1086 | print(f"PART3: {part3}")
1087 | print(f"PART4: {part4}")
1088 | print(f"PART5: {part5}")
1089 | print(f"PART6: {part6}")
1090 | url = f"https://kgs-v2.akamaized.net/{part3}/{part4}/{part5}/{part6}"
1091 |
1092 | if '?list' in url:
1093 | video_id = url.split("/embed/")[1].split("?")[0]
1094 | print(video_id)
1095 | url = f"https://www.youtube.com/embed/{video_id}"
1096 |
1097 | if "youtu" in url:
1098 | ytf = f"b[height<={raw_text2}][ext=mp4]/bv[height<={raw_text2}][ext=mp4]+ba[ext=m4a]/b[ext=mp4]"
1099 | else:
1100 | ytf = f"b[height<={raw_text2}]/bv[height<={raw_text2}]+ba/b/bv+ba"
1101 |
1102 | if "jw-prod" in url:
1103 | cmd = f'yt-dlp -o "{name}.mp4" "{url}"'
1104 |
1105 | elif "youtube.com" in url or "youtu.be" in url:
1106 | cmd = f'yt-dlp --cookies youtube_cookies.txt -f "{ytf}" "{url}" -o "{name}".mp4'
1107 |
1108 | else:
1109 | cmd = f'yt-dlp -f "{ytf}" "{url}" -o "{name}.mp4"'
1110 |
1111 | try:
1112 |
1113 | cc = f'**🎞️ 𝐕𝐈𝐃_𝐈𝐃: {str(count).zfill(3)}.
\n\n📝 𝐓𝐈𝐓𝐋𝐄:👇🏻
\n{name1} {res} .mkv
\n\n📚 𝐁𝐀𝐓𝐂𝐇 𝐍𝐀𝐌𝐄:👇🏻\n
{b_name}
\n\n✨𝐄𝐗𝐓𝐑𝐀𝐂𝐓𝐄𝐃 𝐁𝐘 : {CR}
**\n\n━━━━━✦𝗦𝗣𝗜𝗗𝗬❤️✦━━━━━
'
1114 | cc1 = f'**📁 𝐏𝐃𝐅_𝐈𝐃: {str(count).zfill(3)}.
\n\n📝 𝐓𝐈𝐓𝐋𝐄:👇🏻
\n{name1} .pdf
\n\n📚 𝐁𝐀𝐓𝐂𝐇 𝐍𝐀𝐌𝐄:👇🏻\n
{b_name}
\n\n✨𝐄𝐗𝐓𝐑𝐀𝐂𝐓𝐄𝐃 𝐁𝐘 : {CR}
**\n\n━━━━━✦𝗦𝗣𝗜𝗗𝗬❤️✦━━━━━
'
1115 |
1116 |
1117 | if "drive" in url:
1118 | try:
1119 | ka = await helper.download(url, name)
1120 | copy = await bot.send_document(chat_id=m.chat.id,document=ka, caption=cc1)
1121 | count+=1
1122 | os.remove(ka)
1123 | time.sleep(1)
1124 | except FloodWait as e:
1125 | await m.reply_text(str(e))
1126 | time.sleep(e.x)
1127 | continue
1128 |
1129 | elif ".pdf" in url:
1130 | try:
1131 | await asyncio.sleep(4)
1132 | # Replace spaces with %20 in the URL
1133 | url = url.replace(" ", "%20")
1134 |
1135 | # Create a cloudscraper session
1136 | scraper = cloudscraper.create_scraper()
1137 |
1138 | # Send a GET request to download the PDF
1139 | response = scraper.get(url)
1140 |
1141 | # Check if the response status is OK
1142 | if response.status_code == 200:
1143 | # Write the PDF content to a file
1144 | with open(f'{name}.pdf', 'wb') as file:
1145 | file.write(response.content)
1146 |
1147 | # Send the PDF document
1148 | await asyncio.sleep(4)
1149 | copy = await bot.send_document(chat_id=m.chat.id, document=f'{name}.pdf', caption=cc1)
1150 | count += 1
1151 |
1152 | # Remove the PDF file after sending
1153 | os.remove(f'{name}.pdf')
1154 | else:
1155 | await m.reply_text(f"Failed to download PDF: {response.status_code} {response.reason}")
1156 |
1157 | except FloodWait as e:
1158 | await m.reply_text(str(e))
1159 | time.sleep(e.x)
1160 | continue
1161 |
1162 | elif ".pdf" in url:
1163 | try:
1164 | cmd = f'yt-dlp -o "{name}.pdf" "{url}"'
1165 | download_cmd = f"{cmd} -R 25 --fragment-retries 25"
1166 | os.system(download_cmd)
1167 | copy = await bot.send_document(chat_id=m.chat.id, document=f'{name}.pdf', caption=cc1)
1168 | count += 1
1169 | os.remove(f'{name}.pdf')
1170 | except FloodWait as e:
1171 | await m.reply_text(str(e))
1172 | time.sleep(e.x)
1173 | continue
1174 |
1175 | else:
1176 | Show = f"📥 𝐃𝐨𝐰𝐧𝐥𝐨𝐚𝐝𝐢𝐧𝐠 »\n\n📝 Title:- `{name}\n\n**🔗 𝐓𝐨𝐭𝐚𝐥 𝐔𝐑𝐋 »** ✨{len(links)}✨\n\n⌨ 𝐐𝐮𝐥𝐢𝐭𝐲 » {raw_text2}`\n\n**🔗 𝐔𝐑𝐋 »** `{url}`\n\n**𝐁𝐨𝐭 𝐌𝐚𝐝𝐞 𝐁𝐲 ✦ 🅂🄿🄸🄳🅈"
1177 | prog = await m.reply_text(Show)
1178 | res_file = await helper.download_video(url, cmd, name)
1179 | filename = res_file
1180 | await prog.delete(True)
1181 | await helper.send_vid(bot, m, cc, filename, thumb, name, prog)
1182 | count += 1
1183 | time.sleep(1)
1184 |
1185 | except Exception as e:
1186 | await m.reply_text(
1187 | f"⌘ 𝐃𝐨𝐰𝐧𝐥𝐨𝐚𝐝𝐢𝐧𝐠 𝐈𝐧𝐭𝐞𝐫𝐮𝐩𝐭𝐞𝐝\n\n⌘ 𝐍𝐚𝐦𝐞 » {name}\n⌘ 𝐋𝐢𝐧𝐤 » `{url}`"
1188 | )
1189 | continue
1190 |
1191 | except Exception as e:
1192 | await m.reply_text(e)
1193 | await m.reply_text("🔰Done🔰")
1194 | await m.reply_text("✨Thankyou For Choosing")
1195 |
1196 | # m3u8
1197 |
1198 | @bot.on_message(filters.command(["advance"]))
1199 | async def txt_handler(bot: Client, m: Message):
1200 | editable = await m.reply_text(f"**🔹Send me the TXT file and wait.**")
1201 | input: Message = await bot.listen(editable.chat.id)
1202 | x = await input.download()
1203 | await input.delete(True)
1204 | file_name, ext = os.path.splitext(os.path.basename(x))
1205 | credit = f"𝗦𝗣𝗜𝗗𝗬™🇮🇳"
1206 | try:
1207 | with open(x, "r") as f:
1208 | content = f.read()
1209 | content = content.split("\n")
1210 | links = []
1211 | for i in content:
1212 | links.append(i.split("://", 1))
1213 | os.remove(x)
1214 | except:
1215 | await m.reply_text("Invalid file input.")
1216 | os.remove(x)
1217 | return
1218 |
1219 | await editable.edit(f"Total links found are **{len(links)}**\n\nSend From where you want to download initial is **1**")
1220 | input0: Message = await bot.listen(editable.chat.id)
1221 | raw_text = input0.text
1222 | await input0.delete(True)
1223 | try:
1224 | arg = int(raw_text)
1225 | except:
1226 | arg = 1
1227 | await editable.edit("**Enter Your Batch Name or send d for grabing from text filename.**")
1228 | input1: Message = await bot.listen(editable.chat.id)
1229 | raw_text0 = input1.text
1230 | await input1.delete(True)
1231 | if raw_text0 == 'd':
1232 | b_name = file_name
1233 | else:
1234 | b_name = raw_text0
1235 |
1236 | await editable.edit("**Enter resolution.\n Eg : 480 or 720**")
1237 | input2: Message = await bot.listen(editable.chat.id)
1238 | raw_text2 = input2.text
1239 | await input2.delete(True)
1240 | try:
1241 | if raw_text2 == "144":
1242 | res = "144x256"
1243 | elif raw_text2 == "240":
1244 | res = "240x426"
1245 | elif raw_text2 == "360":
1246 | res = "360x640"
1247 | elif raw_text2 == "480":
1248 | res = "480x854"
1249 | elif raw_text2 == "720":
1250 | res = "720x1280"
1251 | elif raw_text2 == "1080":
1252 | res = "1080x1920"
1253 | else:
1254 | res = "UN"
1255 | except Exception:
1256 | res = "UN"
1257 |
1258 | await editable.edit("**Enter Your Name or send 'de' for use default.\n Eg : 𝗦𝗣𝗜𝗗𝗬™👨🏻💻**")
1259 | input3: Message = await bot.listen(editable.chat.id)
1260 | raw_text3 = input3.text
1261 | await input3.delete(True)
1262 | if raw_text3 == 'de':
1263 | CR = credit
1264 | else:
1265 | CR = raw_text3
1266 |
1267 | await editable.edit("**Enter Your PW Token For 𝐌𝐏𝐃 𝐔𝐑𝐋 or send 'unknown' for use default**")
1268 | input4: Message = await bot.listen(editable.chat.id)
1269 | raw_text4 = input4.text
1270 | await input4.delete(True)
1271 | if raw_text4 == 'unknown':
1272 | MR = raw_text4
1273 | else:
1274 | MR = raw_text4
1275 |
1276 | await editable.edit("Now send the **Thumb url**\n**Eg :** ``\n\nor Send `no`")
1277 | input6 = message = await bot.listen(editable.chat.id)
1278 | raw_text6 = input6.text
1279 | await input6.delete(True)
1280 | await editable.delete()
1281 |
1282 | thumb = input6.text
1283 | if thumb.startswith("http://") or thumb.startswith("https://"):
1284 | getstatusoutput(f"wget '{thumb}' -O 'thumb.jpg'")
1285 | thumb = "thumb.jpg"
1286 | else:
1287 | thumb == "no"
1288 |
1289 | count =int(raw_text)
1290 | try:
1291 | for i in range(arg-1, len(links)):
1292 |
1293 | Vxy = links[i][1].replace("file/d/","uc?export=download&id=").replace("www.youtube-nocookie.com/embed", "youtu.be").replace("?modestbranding=1", "").replace("/view?usp=sharing","")
1294 | url = "https://" + Vxy
1295 | if "visionias" in url:
1296 | async with ClientSession() as session:
1297 | async with session.get(url, headers={'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Accept-Language': 'en-US,en;q=0.9', 'Cache-Control': 'no-cache', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Referer': 'http://www.visionias.in/', 'Sec-Fetch-Dest': 'iframe', 'Sec-Fetch-Mode': 'navigate', 'Sec-Fetch-Site': 'cross-site', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Linux; Android 12; RMX2121) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Mobile Safari/537.36', 'sec-ch-ua': '"Chromium";v="107", "Not=A?Brand";v="24"', 'sec-ch-ua-mobile': '?1', 'sec-ch-ua-platform': '"Android"',}) as resp:
1298 | text = await resp.text()
1299 | url = re.search(r"(https://.*?playlist.m3u8.*?)\"", text).group(1)
1300 |
1301 | if "acecwply" in url:
1302 | cmd = f'yt-dlp -o "{name}.%(ext)s" -f "bestvideo[height<={raw_text2}]+bestaudio" --hls-prefer-ffmpeg --no-keep-video --remux-video mkv --no-warning "{url}"'
1303 |
1304 |
1305 | if "visionias" in url:
1306 | async with ClientSession() as session:
1307 | async with session.get(url, headers={'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Accept-Language': 'en-US,en;q=0.9', 'Cache-Control': 'no-cache', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Referer': 'http://www.visionias.in/', 'Sec-Fetch-Dest': 'iframe', 'Sec-Fetch-Mode': 'navigate', 'Sec-Fetch-Site': 'cross-site', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Linux; Android 12; RMX2121) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Mobile Safari/537.36', 'sec-ch-ua': '"Chromium";v="107", "Not=A?Brand";v="24"', 'sec-ch-ua-mobile': '?1', 'sec-ch-ua-platform': '"Android"',}) as resp:
1308 | text = await resp.text()
1309 | url = re.search(r"(https://.*?playlist.m3u8.*?)\"", text).group(1)
1310 |
1311 | elif 'videos.classplusapp' in url or "tencdn.classplusapp" in url or "webvideos.classplusapp.com" in url or "media-cdn-alisg.classplusapp.com" in url or "videos.classplusapp" in url or "videos.classplusapp.com" in url or "media-cdn-a.classplusapp" in url or "media-cdn.classplusapp" in url:
1312 | url = requests.get(f'https://api.classplusapp.com/cams/uploader/video/jw-signed-url?url={url}', headers={'x-access-token': 'eyJjb3Vyc2VJZCI6IjQ1NjY4NyIsInR1dG9ySWQiOm51bGwsIm9yZ0lkIjo0ODA2MTksImNhdGVnb3J5SWQiOm51bGx9r'}).json()['url']
1313 |
1314 | elif "apps-s3-jw-prod.utkarshapp.com" in url:
1315 | if 'enc_plain_mp4' in url:
1316 | url = url.replace(url.split("/")[-1], res+'.mp4')
1317 |
1318 | elif 'Key-Pair-Id' in url:
1319 | url = None
1320 |
1321 | elif '.m3u8' in url:
1322 | q = ((m3u8.loads(requests.get(url).text)).data['playlists'][1]['uri']).split("/")[0]
1323 | x = url.split("/")[5]
1324 | x = url.replace(x, "")
1325 | url = ((m3u8.loads(requests.get(url).text)).data['playlists'][1]['uri']).replace(q+"/", x)
1326 |
1327 | elif '/master.mpd' in url:
1328 | vid_id = url.split("/")[-2]
1329 | url = f"https://madxapi-d0cbf6ac738c.herokuapp.com/{vid_id}/master.m3u8?token={raw_text4}"
1330 |
1331 | name1 = links[i][0].replace("\t", "").replace(":", "").replace("/", "").replace("+", "").replace("#", "").replace("|", "").replace("@", "").replace("*", "").replace(".", "").replace("https", "").replace("http", "").strip()
1332 | name = f'{str(count).zfill(3)}) {name1[:60]} {my_name}'
1333 |
1334 |
1335 | if "edge.api.brightcove.com" in url:
1336 | bcov = 'bcov_auth=eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJpYXQiOjE3MjQyMzg3OTEsImNvbiI6eyJpc0FkbWluIjpmYWxzZSwiYXVzZXIiOiJVMFZ6TkdGU2NuQlZjR3h5TkZwV09FYzBURGxOZHowOSIsImlkIjoiZEUxbmNuZFBNblJqVEROVmFWTlFWbXhRTkhoS2R6MDkiLCJmaXJzdF9uYW1lIjoiYVcxV05ITjVSemR6Vm10ak1WUlBSRkF5ZVNzM1VUMDkiLCJlbWFpbCI6Ik5Ga3hNVWhxUXpRNFJ6VlhiR0ppWTJoUk0wMVdNR0pVTlU5clJXSkRWbXRMTTBSU2FHRnhURTFTUlQwPSIsInBob25lIjoiVUhVMFZrOWFTbmQ1ZVcwd1pqUTViRzVSYVc5aGR6MDkiLCJhdmF0YXIiOiJLM1ZzY1M4elMwcDBRbmxrYms4M1JEbHZla05pVVQwOSIsInJlZmVycmFsX2NvZGUiOiJOalZFYzBkM1IyNTBSM3B3VUZWbVRtbHFRVXAwVVQwOSIsImRldmljZV90eXBlIjoiYW5kcm9pZCIsImRldmljZV92ZXJzaW9uIjoiUShBbmRyb2lkIDEwLjApIiwiZGV2aWNlX21vZGVsIjoiU2Ftc3VuZyBTTS1TOTE4QiIsInJlbW90ZV9hZGRyIjoiNTQuMjI2LjI1NS4xNjMsIDU0LjIyNi4yNTUuMTYzIn19.snDdd-PbaoC42OUhn5SJaEGxq0VzfdzO49WTmYgTx8ra_Lz66GySZykpd2SxIZCnrKR6-R10F5sUSrKATv1CDk9ruj_ltCjEkcRq8mAqAytDcEBp72-W0Z7DtGi8LdnY7Vd9Kpaf499P-y3-godolS_7ixClcYOnWxe2nSVD5C9c5HkyisrHTvf6NFAuQC_FD3TzByldbPVKK0ag1UnHRavX8MtttjshnRhv5gJs5DQWj4Ir_dkMcJ4JaVZO3z8j0OxVLjnmuaRBujT-1pavsr1CCzjTbAcBvdjUfvzEhObWfA1-Vl5Y4bUgRHhl1U-0hne4-5fF0aouyu71Y6W0eg'
1337 | url = url.split("bcov_auth")[0]+bcov
1338 |
1339 | if 'workers.dev' in url:
1340 | vid_id = url.split("cloudfront.net/")[1].split("/")[0]
1341 | print(vid_id)
1342 | url = f"https://madxapi-d0cbf6ac738c.herokuapp.com/{vid_id}/master.m3u8?token={raw_text4}"
1343 |
1344 | if 'psitoffers.store' in url:
1345 | vid_id = url.split("vid=")[1].split("&")[0]
1346 | print(f"vid_id = {vid_id}")
1347 | url = f"https://madxapi-d0cbf6ac738c.herokuapp.com/{vid_id}/master.m3u8?token={raw_text4}"
1348 |
1349 | if '/master.m3u8' in url:
1350 | vid_id = url.split("/")[-2]
1351 | url = f"https://madxapi-d0cbf6ac738c.herokuapp.com/{vid_id}/master.m3u8?token={raw_text4}"
1352 |
1353 |
1354 | if 'sec-prod-mediacdn.pw.live' in url:
1355 | vid_id = url.split("sec-prod-mediacdn.pw.live/")[1].split("/")[0]
1356 | url = f"https://pwplayer-0e2dbbdc0989.herokuapp.com/player?url=https://d1d34p8vz63oiq.cloudfront.net/{vid_id}/master.mpd?token={raw_text4}"
1357 |
1358 | if "youtu" in url:
1359 | ytf = f"b[height<={raw_text2}][ext=mp4]/bv[height<={raw_text2}][ext=mp4]+ba[ext=m4a]/b[ext=mp4]"
1360 | else:
1361 | ytf = f"b[height<={raw_text2}]/bv[height<={raw_text2}]+ba/b/bv+ba"
1362 |
1363 | if "jw-prod" in url:
1364 | cmd = f'yt-dlp -o "{name}.mp4" "{url}"'
1365 |
1366 | elif "youtube.com" in url or "youtu.be" in url:
1367 | cmd = f'yt-dlp --cookies youtube_cookies.txt -f "{ytf}" "{url}" -o "{name}".mp4'
1368 |
1369 | else:
1370 | cmd = f'yt-dlp -f "{ytf}" "{url}" -o "{name}.mp4"'
1371 |
1372 | try:
1373 |
1374 | cc = f'**🎞️ VID_ID: {str(count).zfill(3)}.\n\n Title: {name1} @Spidy_Universe {res}.mkv\n\n📚 Batch Name: {b_name}\n\n📥 Extracted By : {CR}\n\n**━━━━━✦𝗦𝗣𝗜𝗗𝗬❤️✦━━━━━**'
1375 | cc1 = f'**📁 PDF_ID: {str(count).zfill(3)}.\n\n Title: {name1} @Spidy_Universe.pdf\n\n📚 Batch Name: {b_name}\n\n📥 Extracted By : {CR}\n\n**━━━━━✦𝗦𝗣𝗜𝗗𝗬❤️✦━━━━━**'
1376 |
1377 |
1378 | if "drive" in url:
1379 | try:
1380 | ka = await helper.download(url, name)
1381 | copy = await bot.send_document(chat_id=m.chat.id,document=ka, caption=cc1)
1382 | count+=1
1383 | os.remove(ka)
1384 | time.sleep(1)
1385 | except FloodWait as e:
1386 | await m.reply_text(str(e))
1387 | time.sleep(e.x)
1388 | continue
1389 |
1390 | elif ".pdf" in url:
1391 | try:
1392 | await asyncio.sleep(4)
1393 | # Replace spaces with %20 in the URL
1394 | url = url.replace(" ", "%20")
1395 |
1396 | # Create a cloudscraper session
1397 | scraper = cloudscraper.create_scraper()
1398 |
1399 | # Send a GET request to download the PDF
1400 | response = scraper.get(url)
1401 |
1402 | # Check if the response status is OK
1403 | if response.status_code == 200:
1404 | # Write the PDF content to a file
1405 | with open(f'{name}.pdf', 'wb') as file:
1406 | file.write(response.content)
1407 |
1408 | # Send the PDF document
1409 | await asyncio.sleep(4)
1410 | copy = await bot.send_document(chat_id=m.chat.id, document=f'{name}.pdf', caption=cc1)
1411 | count += 1
1412 |
1413 | # Remove the PDF file after sending
1414 | os.remove(f'{name}.pdf')
1415 | else:
1416 | await m.reply_text(f"Failed to download PDF: {response.status_code} {response.reason}")
1417 |
1418 | except FloodWait as e:
1419 | await m.reply_text(str(e))
1420 | time.sleep(e.x)
1421 | continue
1422 |
1423 | elif ".pdf" in url:
1424 | try:
1425 | cmd = f'yt-dlp -o "{name}.pdf" "{url}"'
1426 | download_cmd = f"{cmd} -R 25 --fragment-retries 25"
1427 | os.system(download_cmd)
1428 | copy = await bot.send_document(chat_id=m.chat.id, document=f'{name}.pdf', caption=cc1)
1429 | count += 1
1430 | os.remove(f'{name}.pdf')
1431 | except FloodWait as e:
1432 | await m.reply_text(str(e))
1433 | time.sleep(e.x)
1434 | continue
1435 |
1436 | else:
1437 | Show = f"📥 𝐃𝐨𝐰𝐧𝐥𝐨𝐚𝐝𝐢𝐧𝐠 »\n\n📝 Title:- `{name}\n\n**🔗 𝐓𝐨𝐭𝐚𝐥 𝐔𝐑𝐋 »** ✨{len(links)}✨\n\n⌨ 𝐐𝐮𝐥𝐢𝐭𝐲 » {raw_text2}`\n\n**🔗 𝐔𝐑𝐋 »** `{url}`\n\n**𝐁𝐨𝐭 𝐌𝐚𝐝𝐞 𝐁𝐲 ✦ 🅂🄿🄸🄳🅈"
1438 | prog = await m.reply_text(Show)
1439 | res_file = await helper.download_video(url, cmd, name)
1440 | filename = res_file
1441 | await prog.delete(True)
1442 | await helper.send_vid(bot, m, cc, filename, thumb, name, prog)
1443 | count += 1
1444 | time.sleep(1)
1445 |
1446 | except Exception as e:
1447 | await m.reply_text(
1448 | f"⌘ 𝐃𝐨𝐰𝐧𝐥𝐨𝐚𝐝𝐢𝐧𝐠 𝐈𝐧𝐭𝐞𝐫𝐮𝐩𝐭𝐞𝐝\n\n⌘ 𝐍𝐚𝐦𝐞 » {name}\n⌘ 𝐋𝐢𝐧𝐤 » `{url}`"
1449 | )
1450 | continue
1451 |
1452 | except Exception as e:
1453 | await m.reply_text(e)
1454 | await m.reply_text("🔰Done🔰")
1455 | await m.reply_text("✨Thankyou For Choosing")
1456 |
1457 |
1458 | @bot.on_message(filters.command(["alpha"]))
1459 | async def txt_handler(bot: Client, m: Message):
1460 | editable = await m.reply_text(f"**🔹Send me the TXT file and wait.**")
1461 | input: Message = await bot.listen(editable.chat.id)
1462 | x = await input.download()
1463 | await input.delete(True)
1464 | file_name, ext = os.path.splitext(os.path.basename(x))
1465 | credit = f"𝗦𝗣𝗜𝗗𝗬™🇮🇳"
1466 | try:
1467 | with open(x, "r") as f:
1468 | content = f.read()
1469 | content = content.split("\n")
1470 | links = []
1471 | for i in content:
1472 | links.append(i.split("://", 1))
1473 | os.remove(x)
1474 | except:
1475 | await m.reply_text("Invalid file input.")
1476 | os.remove(x)
1477 | return
1478 |
1479 | await editable.edit(f"Total links found are **{len(links)}**\n\nSend From where you want to download initial is **1**")
1480 | input0: Message = await bot.listen(editable.chat.id)
1481 | raw_text = input0.text
1482 | await input0.delete(True)
1483 | try:
1484 | arg = int(raw_text)
1485 | except:
1486 | arg = 1
1487 | await editable.edit("**Enter Your Batch Name or send d for grabing from text filename.**")
1488 | input1: Message = await bot.listen(editable.chat.id)
1489 | raw_text0 = input1.text
1490 | await input1.delete(True)
1491 | if raw_text0 == 'd':
1492 | b_name = file_name
1493 | else:
1494 | b_name = raw_text0
1495 |
1496 | await editable.edit("**Enter resolution.\n Eg : 480 or 720**")
1497 | input2: Message = await bot.listen(editable.chat.id)
1498 | raw_text2 = input2.text
1499 | await input2.delete(True)
1500 | try:
1501 | if raw_text2 == "144":
1502 | res = "144x256"
1503 | elif raw_text2 == "240":
1504 | res = "240x426"
1505 | elif raw_text2 == "360":
1506 | res = "360x640"
1507 | elif raw_text2 == "480":
1508 | res = "480x854"
1509 | elif raw_text2 == "720":
1510 | res = "720x1280"
1511 | elif raw_text2 == "1080":
1512 | res = "1080x1920"
1513 | else:
1514 | res = "UN"
1515 | except Exception:
1516 | res = "UN"
1517 |
1518 | await editable.edit("**Enter Your Name or send 'de' for use default.\n Eg : 𝗦𝗣𝗜𝗗𝗬™👨🏻💻**")
1519 | input3: Message = await bot.listen(editable.chat.id)
1520 | raw_text3 = input3.text
1521 | await input3.delete(True)
1522 | if raw_text3 == 'de':
1523 | CR = credit
1524 | else:
1525 | CR = raw_text3
1526 |
1527 |
1528 | await editable.edit("Now send the **Thumb url**\n**Eg :** ``\n\nor Send `no`")
1529 | input6 = message = await bot.listen(editable.chat.id)
1530 | raw_text6 = input6.text
1531 | await input6.delete(True)
1532 | await editable.delete()
1533 |
1534 | thumb = input6.text
1535 | if thumb.startswith("http://") or thumb.startswith("https://"):
1536 | getstatusoutput(f"wget '{thumb}' -O 'thumb.jpg'")
1537 | thumb = "thumb.jpg"
1538 | else:
1539 | thumb == "no"
1540 |
1541 | count =int(raw_text)
1542 | try:
1543 | for i in range(arg-1, len(links)):
1544 |
1545 | Vxy = links[i][1].replace("file/d/","uc?export=download&id=").replace("www.youtube-nocookie.com/embed", "youtu.be").replace("?modestbranding=1", "").replace("/view?usp=sharing","")
1546 | url = "https://" + Vxy
1547 | if "visionias" in url:
1548 | async with ClientSession() as session:
1549 | async with session.get(url, headers={'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Accept-Language': 'en-US,en;q=0.9', 'Cache-Control': 'no-cache', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Referer': 'http://www.visionias.in/', 'Sec-Fetch-Dest': 'iframe', 'Sec-Fetch-Mode': 'navigate', 'Sec-Fetch-Site': 'cross-site', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Linux; Android 12; RMX2121) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Mobile Safari/537.36', 'sec-ch-ua': '"Chromium";v="107", "Not=A?Brand";v="24"', 'sec-ch-ua-mobile': '?1', 'sec-ch-ua-platform': '"Android"',}) as resp:
1550 | text = await resp.text()
1551 | url = re.search(r"(https://.*?playlist.m3u8.*?)\"", text).group(1)
1552 |
1553 | if "acecwply" in url:
1554 | cmd = f'yt-dlp -o "{name}.%(ext)s" -f "bestvideo[height<={raw_text2}]+bestaudio" --hls-prefer-ffmpeg --no-keep-video --remux-video mkv --no-warning "{url}"'
1555 |
1556 |
1557 | if "visionias" in url:
1558 | async with ClientSession() as session:
1559 | async with session.get(url, headers={'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9', 'Accept-Language': 'en-US,en;q=0.9', 'Cache-Control': 'no-cache', 'Connection': 'keep-alive', 'Pragma': 'no-cache', 'Referer': 'http://www.visionias.in/', 'Sec-Fetch-Dest': 'iframe', 'Sec-Fetch-Mode': 'navigate', 'Sec-Fetch-Site': 'cross-site', 'Upgrade-Insecure-Requests': '1', 'User-Agent': 'Mozilla/5.0 (Linux; Android 12; RMX2121) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Mobile Safari/537.36', 'sec-ch-ua': '"Chromium";v="107", "Not=A?Brand";v="24"', 'sec-ch-ua-mobile': '?1', 'sec-ch-ua-platform': '"Android"',}) as resp:
1560 | text = await resp.text()
1561 | url = re.search(r"(https://.*?playlist.m3u8.*?)\"", text).group(1)
1562 |
1563 | elif 'videos.classplusapp' in url or "tencdn.classplusapp" in url or "webvideos.classplusapp.com" in url or "media-cdn-alisg.classplusapp.com" in url or "videos.classplusapp" in url or "videos.classplusapp.com" in url or "media-cdn-a.classplusapp" in url or "media-cdn.classplusapp" in url:
1564 | url = requests.get(f'https://api.classplusapp.com/cams/uploader/video/jw-signed-url?url={url}', headers={'x-access-token': 'eyJjb3Vyc2VJZCI6IjQ1NjY4NyIsInR1dG9ySWQiOm51bGwsIm9yZ0lkIjo0ODA2MTksImNhdGVnb3J5SWQiOm51bGx9r'}).json()['url']
1565 |
1566 | elif "apps-s3-jw-prod.utkarshapp.com" in url:
1567 | if 'enc_plain_mp4' in url:
1568 | url = url.replace(url.split("/")[-1], res+'.mp4')
1569 |
1570 | elif 'Key-Pair-Id' in url:
1571 | url = None
1572 |
1573 | elif '.m3u8' in url:
1574 | q = ((m3u8.loads(requests.get(url).text)).data['playlists'][1]['uri']).split("/")[0]
1575 | x = url.split("/")[5]
1576 | x = url.replace(x, "")
1577 | url = ((m3u8.loads(requests.get(url).text)).data['playlists'][1]['uri']).replace(q+"/", x)
1578 |
1579 | elif '/master.mpd' in url:
1580 | vid_id = url.split("/")[-2]
1581 | url = f"https://pw-links-api.onrender.com/process?v=https://sec1.pw.live/{vid_id}/master.mpd&quality={raw_text2}"
1582 |
1583 | name1 = links[i][0].replace("\t", "").replace(":", "").replace("/", "").replace("+", "").replace("#", "").replace("|", "").replace("@", "").replace("*", "").replace(".", "").replace("https", "").replace("http", "").strip()
1584 | name = f'{str(count).zfill(3)}) {name1[:60]} {my_name}'
1585 |
1586 |
1587 | if '/do' in url:
1588 | pdf_id = url.split("/")[-1].split(".pdf")[0]
1589 | print(pdf_id)
1590 | url = f"https://kgs-v2.akamaized.net/kgs/do/pdfs/{pdf_id}.pdf"
1591 |
1592 |
1593 | if 'bitgravity.com' in url:
1594 | parts = url.split('/')
1595 | part1 = parts[1]
1596 | part2 = parts[2]
1597 | part3 = parts[3]
1598 | part4 = parts[4]
1599 | part5 = parts[5]
1600 | part6 = parts[6]
1601 |
1602 | print(f"PART1: {part1}")
1603 | print(f"PART2: {part2}")
1604 | print(f"PART3: {part3}")
1605 | print(f"PART4: {part4}")
1606 | print(f"PART5: {part5}")
1607 | print(f"PART6: {part6}")
1608 | url = f"https://kgs-v2.akamaized.net/{part3}/{part4}/{part5}/{part6}"
1609 |
1610 | if '?list' in url:
1611 | video_id = url.split("/embed/")[1].split("?")[0]
1612 | print(video_id)
1613 | url = f"https://www.youtube.com/embed/{video_id}"
1614 |
1615 | if 'sec-prod-mediacdn.pw.live' in url:
1616 | vid_id = url.split("sec-prod-mediacdn.pw.live/")[1].split("/")[0]
1617 | url = f"https://pwplayer-0e2dbbdc0989.herokuapp.com/player?url=https://d1d34p8vz63oiq.cloudfront.net/{vid_id}/master.mpd?token={raw_text4}"
1618 |
1619 | if "youtu" in url:
1620 | ytf = f"b[height<={raw_text2}][ext=mp4]/bv[height<={raw_text2}][ext=mp4]+ba[ext=m4a]/b[ext=mp4]"
1621 | else:
1622 | ytf = f"b[height<={raw_text2}]/bv[height<={raw_text2}]+ba/b/bv+ba"
1623 |
1624 | if "jw-prod" in url:
1625 | cmd = f'yt-dlp -o "{name}.mp4" "{url}"'
1626 |
1627 | elif "youtube.com" in url or "youtu.be" in url:
1628 | cmd = f'yt-dlp --cookies youtube_cookies.txt -f "{ytf}" "{url}" -o "{name}".mp4'
1629 |
1630 | else:
1631 | cmd = f'yt-dlp -f "{ytf}" "{url}" -o "{name}.mp4"'
1632 |
1633 | try:
1634 |
1635 | cc = f'**🎞️ VID_ID: {str(count).zfill(3)}.\n\n📝 Title: {name1} @Spidy_Universe {res}.mkv\n\n📚 Batch Name: {b_name}
\n\n📥 Extracted By : {CR}\n\n**━━━━━✦𝗦𝗣𝗜𝗗𝗬❤️✦━━━━━**'
1636 | cc1 = f'**📁 PDF_ID: {str(count).zfill(3)}.\n\n📝 Title: {name1} @Spidy_Universe.pdf\n\n📚 Batch Name: {b_name}
\n\n📥 Extracted By : {CR}\n\n**━━━━━✦𝗦𝗣𝗜𝗗𝗬❤️✦━━━━━**'
1637 |
1638 |
1639 | if "drive" in url:
1640 | try:
1641 | ka = await helper.download(url, name)
1642 | copy = await bot.send_document(chat_id=m.chat.id,document=ka, caption=cc1)
1643 | count+=1
1644 | os.remove(ka)
1645 | time.sleep(1)
1646 | except FloodWait as e:
1647 | await m.reply_text(str(e))
1648 | time.sleep(e.x)
1649 | continue
1650 |
1651 | elif ".pdf" in url:
1652 | try:
1653 | await asyncio.sleep(4)
1654 | # Replace spaces with %20 in the URL
1655 | url = url.replace(" ", "%20")
1656 |
1657 | # Create a cloudscraper session
1658 | scraper = cloudscraper.create_scraper()
1659 |
1660 | # Send a GET request to download the PDF
1661 | response = scraper.get(url)
1662 |
1663 | # Check if the response status is OK
1664 | if response.status_code == 200:
1665 | # Write the PDF content to a file
1666 | with open(f'{name}.pdf', 'wb') as file:
1667 | file.write(response.content)
1668 |
1669 | # Send the PDF document
1670 | await asyncio.sleep(4)
1671 | copy = await bot.send_document(chat_id=m.chat.id, document=f'{name}.pdf', caption=cc1)
1672 | count += 1
1673 |
1674 | # Remove the PDF file after sending
1675 | os.remove(f'{name}.pdf')
1676 | else:
1677 | await m.reply_text(f"Failed to download PDF: {response.status_code} {response.reason}")
1678 |
1679 | except FloodWait as e:
1680 | await m.reply_text(str(e))
1681 | time.sleep(e.x)
1682 | continue
1683 |
1684 | elif ".pdf" in url:
1685 | try:
1686 | cmd = f'yt-dlp -o "{name}.pdf" "{url}"'
1687 | download_cmd = f"{cmd} -R 25 --fragment-retries 25"
1688 | os.system(download_cmd)
1689 | copy = await bot.send_document(chat_id=m.chat.id, document=f'{name}.pdf', caption=cc1)
1690 | count += 1
1691 | os.remove(f'{name}.pdf')
1692 | except FloodWait as e:
1693 | await m.reply_text(str(e))
1694 | time.sleep(e.x)
1695 | continue
1696 |
1697 | else:
1698 | Show = f"📥 𝐃𝐨𝐰𝐧𝐥𝐨𝐚𝐝𝐢𝐧𝐠 »\n\n📝 Title:- `{name}\n\n**🔗 𝐓𝐨𝐭𝐚𝐥 𝐔𝐑𝐋 »** ✨{len(links)}✨\n\n⌨ 𝐐𝐮𝐥𝐢𝐭𝐲 » {raw_text2}`\n\n**🔗 𝐔𝐑𝐋 »** `{url}`\n\n**𝐁𝐨𝐭 𝐌𝐚𝐝𝐞 𝐁𝐲 ✦ 🅂🄿🄸🄳🅈"
1699 | prog = await m.reply_text(Show)
1700 | res_file = await helper.download_video(url, cmd, name)
1701 | filename = res_file
1702 | await prog.delete(True)
1703 | await helper.send_vid(bot, m, cc, filename, thumb, name, prog)
1704 | count += 1
1705 | time.sleep(1)
1706 |
1707 | except Exception as e:
1708 | await m.reply_text(
1709 | f"⌘ 𝐃𝐨𝐰𝐧𝐥𝐨𝐚𝐝𝐢𝐧𝐠 𝐈𝐧𝐭𝐞𝐫𝐮𝐩𝐭𝐞𝐝\n\n⌘ 𝐍𝐚𝐦𝐞 » {name}\n⌘ 𝐋𝐢𝐧𝐤 » `{url}`"
1710 | )
1711 | continue
1712 |
1713 | except Exception as e:
1714 | await m.reply_text(e)
1715 | await m.reply_text("🔰Done🔰")
1716 | await m.reply_text("✨Thankyou For Choosing")
1717 |
1718 | bot.run()
1719 | if __name__ == "__main__":
1720 | asyncio.run(main())
1721 |
1722 |
1723 |
--------------------------------------------------------------------------------