{the_real_download_location} in {ms} seconds"
79 | )
80 | else:
81 | await mess_age.edit_text("😔 Download Cancelled or some error happened")
82 | return None, mess_age
83 | return the_real_download_location, mess_age
--------------------------------------------------------------------------------
/tobrot/helper_funcs/extract_link_from_message.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # (c) Shrimadhav U K
4 |
5 | import logging
6 |
7 | import aiohttp
8 | from pyrogram.types import MessageEntity
9 | from tobrot import TG_OFFENSIVE_API, LOGGER
10 |
11 |
12 | def extract_url_from_entity(entities: MessageEntity, text: str):
13 | url = None
14 | for entity in entities:
15 | if entity.type == "text_link":
16 | url = entity.url
17 | elif entity.type == "url":
18 | o = entity.offset
19 | l = entity.length
20 | url = text[o : o + l]
21 | return url
22 |
23 |
24 | async def extract_link(message, type_o_request):
25 | custom_file_name = None
26 | url = None
27 | youtube_dl_username = None
28 | youtube_dl_password = None
29 |
30 | if message is None:
31 | url = None
32 | custom_file_name = None
33 |
34 | elif message.text is not None:
35 | if message.text.lower().startswith("magnet:"):
36 | url = message.text.strip()
37 |
38 | elif "|" in message.text:
39 | url_parts = message.text.split("|")
40 | if len(url_parts) == 2:
41 | url = url_parts[0]
42 | custom_file_name = url_parts[1]
43 | elif len(url_parts) == 4:
44 | url = url_parts[0]
45 | custom_file_name = url_parts[1]
46 | youtube_dl_username = url_parts[2]
47 | youtube_dl_password = url_parts[3]
48 |
49 | elif message.entities is not None:
50 | url = extract_url_from_entity(message.entities, message.text)
51 |
52 | else:
53 | url = message.text.strip()
54 |
55 | elif message.document is not None:
56 | if message.document.file_name.lower().endswith(".torrent"):
57 | url = await message.download()
58 | custom_file_name = message.caption
59 |
60 | elif message.caption is not None:
61 | if "|" in message.caption:
62 | url_parts = message.caption.split("|")
63 | if len(url_parts) == 2:
64 | url = url_parts[0]
65 | custom_file_name = url_parts[1]
66 | elif len(url_parts) == 4:
67 | url = url_parts[0]
68 | custom_file_name = url_parts[1]
69 | youtube_dl_username = url_parts[2]
70 | youtube_dl_password = url_parts[3]
71 |
72 | elif message.caption_entities is not None:
73 | url = extract_url_from_entity(message.caption_entities, message.caption)
74 |
75 | else:
76 | url = message.caption.strip()
77 |
78 | elif message.entities is not None:
79 | url = message.text
80 |
81 | # trim blank spaces from the URL
82 | # might have some issues with #45
83 | if url is not None:
84 | url = url.strip()
85 | if custom_file_name is not None:
86 | custom_file_name = custom_file_name.strip()
87 | # https://stackoverflow.com/a/761825/4723940
88 | if youtube_dl_username is not None:
89 | youtube_dl_username = youtube_dl_username.strip()
90 | if youtube_dl_password is not None:
91 | youtube_dl_password = youtube_dl_password.strip()
92 |
93 | # additional conditional check,
94 | # here to FILTER out BAD URLs
95 | LOGGER.info(TG_OFFENSIVE_API)
96 | if TG_OFFENSIVE_API is not None:
97 | try:
98 | async with aiohttp.ClientSession() as session:
99 | api_url = TG_OFFENSIVE_API.format(
100 | i=url, m=custom_file_name, t=type_o_request
101 | )
102 | LOGGER.info(api_url)
103 | async with session.get(api_url) as resp:
104 | suats = int(resp.status)
105 | err = await resp.text()
106 | if suats != 200:
107 | url = None
108 | custom_file_name = err
109 | except:
110 | # this might occur in case of a BAD API URL,
111 | # who knows? :\
112 | pass
113 |
114 | return url, custom_file_name, youtube_dl_username, youtube_dl_password
115 |
--------------------------------------------------------------------------------
/tobrot/plugins/new_join_fn.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | import pyrogram
4 | from tobrot import *
5 |
6 | from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton
7 |
8 |
9 | async def new_join_f(client, message):
10 | chat_type = message.chat.type
11 | if chat_type != "private":
12 | await message.reply_text(
13 | f"""🙋🏻♂️ Hello dear!\n\n This Is A Leech Bot .This Chat Is Not Supposed To Use Me\n\nCurrent CHAT ID: {message.chat.id}""",
14 | parse_mode="html",
15 | reply_markup=InlineKeyboardMarkup(
16 | [
17 | [
18 | InlineKeyboardButton('Channel', url='https://t.me/MaxxBots')
19 | ]
20 | ]
21 | )
22 | )
23 | # leave chat
24 | await client.leave_chat(chat_id=message.chat.id, delete=True)
25 | # delete all other messages, except for AUTH_CHANNEL
26 | await message.delete(revoke=True)
27 |
28 |
29 | async def help_message_f(client, message):
30 | if UPLOAD_AS_DOC:
31 | utxt = "Document"
32 | else:
33 | utxt = "Streamable"
34 | await message.reply_text(
35 | f"""Available Commands
36 | /{RCLONE_COMMAND} : This will change your drive config on fly.(First one will be default)
37 |
38 | /{CLONE_COMMAND_G}: This command is used to clone gdrive files or folder using gclone.
39 | Syntax:- `[ID of the file or folder][one space][name of your folder only(If the id is of file, don't put anything)]` and then reply /gclone to it.
40 |
41 | /{LOG_COMMAND}: This will send you a txt file of the logs.
42 |
43 | /{YTDL_COMMAND}: This command should be used as reply to a supported link
44 |
45 | /{PYTDL_COMMAND}: This command will download videos from youtube playlist link and will upload to telegram.
46 |
47 | /{GYTDL_COMMAND}: This will download and upload to your cloud.
48 |
49 | /{GPYTDL_COMMAND}: This download youtube playlist and upload to your cloud.
50 |
51 | /{LEECH_COMMAND}: This command should be used as reply to a magnetic link, a torrent link, or a direct link. [this command will SPAM the chat and send the downloads a seperate files, if there is more than one file, in the specified torrent]
52 |
53 | /{LEECH_ZIP_COMMAND}: This command should be used as reply to a magnetic link, a torrent link, or a direct link. [This command will create a .tar.gz file of the output directory, and send the files in the chat, splited into PARTS of 1024MiB each, due to Telegram limitations]
54 |
55 | /{GLEECH_COMMAND}: This command should be used as reply to a magnetic link, a torrent link, or a direct link. And this will download the files from the given link or torrent and will upload to the cloud using rclone.
56 |
57 | /{GLEECH_ZIP_COMMAND} This command will compress the folder/file and will upload to your cloud.
58 |
59 | /{LEECH_UNZIP_COMMAND}: This will unarchive file and upload to telegram.
60 |
61 | /{GLEECH_UNZIP_COMMAND}: This will unarchive file and upload to cloud.
62 |
63 | /{TELEGRAM_LEECH_COMMAND}: This will mirror the telegram files to ur respective cloud .
64 |
65 | /{TELEGRAM_LEECH_UNZIP_COMMAND}: This will unarchive telegram file and upload to cloud.
66 |
67 | /{GET_SIZE_G}: This will give you total size of your destination folder in cloud.
68 |
69 | /{RENEWME_COMMAND}: This will clear the remains of downloads which are not getting deleted after upload of the file or after /cancel command.
70 |
71 | /{CANCEL_COMMAND_G} [GID]: To cancel ur download
72 |
73 | /{RENAME_COMMAND}: To rename the telegram files.
74 |
75 | Only work with direct link and youtube link for nowIt is like u can add custom name as prefix of the original file name. Like if your file name is gk.txt uploaded will be what u add in CUSTOM_FILE_NAME + gk.txt
76 |
77 | Only works with direct link/youtube link.No magnet or torrent.
78 |
79 | And also added custom name like...
80 |
81 | You have to pass link as www.download.me/gk.txt | new.txt
82 |
83 | the file will be uploaded as new.txt.
84 |
85 | /{SAVE_THUMBNAIL}: Reply To A Photo To Save As Custom Thumbnail
86 |
87 | /{CLEAR_THUMBNAIL}: To Clear Saved Custom Thumbnail
88 |
89 | /{TOGGLE_VID}: To Upload Your Files As Streamable
90 |
91 | /{TOGGLE_DOC}: To Upload Your Files As Documents
92 |
93 | **How to Use....?**
94 | __Send any one of the available command, as a reply to a valid link/magnet/torrent. 👊__
95 |
96 | **Current Custom Upload Mode:** `{utxt}`
97 |
98 | """,
99 | disable_web_page_preview=True,
100 | )
101 |
--------------------------------------------------------------------------------
/app.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "Telegram Torrent Leecher",
3 | "description": "A Telegram Torrent (and youtube-dl) Leecher based on Pyrogram. Powered by @MaxxBots",
4 | "logo": "https://telegra.ph/file/101a7d52e02772152c6e3.jpg",
5 | "keywords": [
6 | "telegram"
7 | ],
8 | "repository": "https://github.com/MaxxRider/Leech-Pro",
9 | "success_url": "https://t.me/MaxxBotChat",
10 | "website": "https://github.com/MaxxRider/Leech-Pro",
11 | "env": {
12 | "ENV": {
13 | "description": "Setting this to ANYTHING will enable webhooks when in env mode",
14 | "value": "ANYTHING"
15 | },
16 | "APP_ID": {
17 | "description": "Get this value from https://my.telegram.org",
18 | "value": ""
19 | },
20 | "API_HASH": {
21 | "description": "Get this value from https://my.telegram.org",
22 | "value": ""
23 | },
24 | "TG_BOT_TOKEN": {
25 | "description": "get this value from @BotFather",
26 | "value": ""
27 | },
28 | "AUTH_CHANNEL": {
29 | "description": "should be an integer. The BOT API ID of the Telegram Group, where the Leecher should work., Put Group ID",
30 | "value": ""
31 | },
32 | "OWNER_ID": {
33 | "description": "should be an integer. ID of owner of bot",
34 | "value": ""
35 | },
36 | "UPLOAD_AS_DOC": {
37 | "description": "True/False. If true all files will be uploaded as documents. Default is False.",
38 | "required": false
39 | },
40 | "CHUNK_SIZE": {
41 | "description": "should be an integer",
42 | "value": "128",
43 | "required": false
44 | },
45 | "ARIA_TWO_STARTED_PORT": {
46 | "description": "should be an integer. The port on which aria2c daemon must start, and keep listening.",
47 | "value": "6800",
48 | "required": false
49 | },
50 | "EDIT_SLEEP_TIME_OUT": {
51 | "description": "should be an integer. Number of seconds to wait before editing a message.",
52 | "value": "15",
53 | "required": false
54 | },
55 | "MAX_TIME_TO_WAIT_FOR_TORRENTS_TO_START": {
56 | "description": "should be an integer. Number of seconds to wait before cancelling a torrent.",
57 | "required": false
58 | },
59 | "FINISHED_PROGRESS_STR": {
60 | "description": "should be a single character.",
61 | "required": false
62 | },
63 | "UN_FINISHED_PROGRESS_STR": {
64 | "description": "should be a single character.",
65 | "required": false
66 | },
67 | "TG_OFFENSIVE_API": {
68 | "description": "should be an URL accepting the FormParams {i}, {m}, and {t}",
69 | "required": false
70 | },
71 | "LEECH_COMMAND": {
72 | "description": "Enter your custom leech command like /leech@botname and so on. Default is /leech",
73 | "required": false
74 | },
75 | "SAVE_THUMBNAIL": {
76 | "description": "For custom thumbnail Command. default is /savethumbnail",
77 | "required": false
78 | },
79 | "CLEAR_THUMBNAIL": {
80 | "description": "For Delete Thumbnail Command. Default is /clearthumbnail",
81 | "required": false
82 | },
83 | "INDEX_LINK": {
84 | "description": "Enter your index link:",
85 | "required": false
86 | },
87 | "GLEECH_COMMAND": {
88 | "description": "Enter your custom gleech command like /gleech1@urgroupname and so on. Default is /gleech",
89 | "required": false
90 | },
91 | "TELEGRAM_LEECH_COMMAND_G": {
92 | "description": "Enter your custom tleech command like /tleech1@urgroupname and so on. Default is /tleech",
93 | "required": false
94 | },
95 | "YTDL_COMMAND": {
96 | "description": "Enter your custom ytdl command like ytdl1@urgroupname and so on. Default is /ytdl.",
97 | "required": false
98 | },
99 | "PYTDL_COMMAND_G": {
100 | "description": "Enter your custom pytdl command like pytdl1@urgroupname and so on. Default is /pytdl.",
101 | "required": false
102 | },
103 | "CANCEL_COMMAND_G": {
104 | "description": "Enter your custom cancel command like cancel@urgroupname and so on. Default is /cancel.",
105 | "required": false
106 | },
107 | "GET_SIZE_G": {
108 | "description": "Enter your custom getsize command like getsize@urgroupname and so on. Default is /getsize.",
109 | "required": false
110 | },
111 | "RCLONE_CONFIG": {
112 | "description": "Enter your copied text from rclone config. Compulsory for /gleech as well as /tleech command ",
113 | "required": false
114 | },
115 | "DESTINATION_FOLDER": {
116 | "description": "Enter your Cloud folder NAME(not ID😅) in which you want to upload/store your files.",
117 | "required": false
118 | },
119 | "CUSTOM_FILE_NAME": {
120 | "description": "fill with name u want to prefix the file name like ur channel username🙊, keep empty for do nothing, but add to ur config vars even without input.",
121 | "required": false
122 | }
123 | },
124 | "stack": "container"
125 | }
126 |
--------------------------------------------------------------------------------
/tobrot/helper_funcs/display_progress.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # (c) Shrimadhav U K | gautamajay52 | MaxxRider
4 |
5 | import logging
6 | import math
7 | import os
8 | import time
9 |
10 | from pyrogram.errors.exceptions import FloodWait
11 | from tobrot import (
12 | EDIT_SLEEP_TIME_OUT,
13 | FINISHED_PROGRESS_STR,
14 | UN_FINISHED_PROGRESS_STR,
15 | gDict,
16 | LOGGER,
17 | )
18 | from pyrogram import Client
19 |
20 | logging.basicConfig(
21 | level=logging.DEBUG, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
22 | )
23 | logger = logging.getLogger(__name__)
24 |
25 | from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, Message
26 |
27 |
28 |
29 | class Progress:
30 | def __init__(self, from_user, client, mess: Message):
31 | self._from_user = from_user
32 | self._client = client
33 | self._mess = mess
34 | self._cancelled = False
35 |
36 | @property
37 | def is_cancelled(self):
38 | chat_id = self._mess.chat.id
39 | mes_id = self._mess.message_id
40 | if gDict[chat_id] and mes_id in gDict[chat_id]:
41 | self._cancelled = True
42 | return self._cancelled
43 |
44 | async def progress_for_pyrogram(self, current, total, ud_type, start):
45 | chat_id = self._mess.chat.id
46 | mes_id = self._mess.message_id
47 | from_user = self._from_user
48 | now = time.time()
49 | diff = now - start
50 | reply_markup = InlineKeyboardMarkup(
51 | [
52 | [
53 | InlineKeyboardButton(
54 | "Cancel 🚫",
55 | callback_data=(
56 | f"gUPcancel/{chat_id}/{mes_id}/{from_user}"
57 | ).encode("UTF-8"),
58 | )
59 | ]
60 | ]
61 | )
62 | if self.is_cancelled:
63 | LOGGER.info("stopping ")
64 | await self._mess.edit(
65 | f"😔 Cancelled/ERROR: `{ud_type}` ({humanbytes(total)})"
66 | )
67 | await self._client.stop_transmission()
68 |
69 | if round(diff % float(EDIT_SLEEP_TIME_OUT)) == 0 or current == total:
70 | # if round(current / total * 100, 0) % 5 == 0:
71 | percentage = current * 100 / total
72 | speed = current / diff
73 | elapsed_time = round(diff) * 1000
74 | time_to_completion = round((total - current) / speed) * 1000
75 | estimated_total_time = time_to_completion
76 |
77 | elapsed_time = TimeFormatter(milliseconds=elapsed_time)
78 | estimated_total_time = TimeFormatter(milliseconds=estimated_total_time)
79 |
80 | progress = "\n{0}{1} {2}%\n".format(
81 | ''.join([FINISHED_PROGRESS_STR for i in range(math.floor(percentage / 5))]),
82 | ''.join([UN_FINISHED_PROGRESS_STR for i in range(20 - math.floor(percentage / 5))]),
83 | round(percentage, 2))
84 | #cpu = "{psutil.cpu_percent()}%"
85 | tmp = progress + "\n**• Total 📀:**`〘{1}〙`\n**• Done ✓ :**` 〘{0}〙`\n**• Speed 🚀 :** `〘{2}〙`\n**• ETA ⏳ :**` 〘{3}〙`".format(
86 | humanbytes(current),
87 | humanbytes(total),
88 | humanbytes(speed),
89 | # elapsed_time if elapsed_time != '' else "0 s",
90 | estimated_total_time if estimated_total_time != "" else "0 s",
91 | #tmp += "\n│"+"\n╰── ⌊ @TGFilmZone ⌉"
92 | )
93 | try:
94 | if not self._mess.photo:
95 | await self._mess.edit_text(
96 | text="{}\n {}".format(ud_type, tmp), reply_markup=reply_markup
97 | )
98 | else:
99 | await self._mess.edit_caption(
100 | caption="{}\n {}".format(ud_type, tmp)
101 | )
102 | except FloodWait as fd:
103 | logger.warning(f"{fd}")
104 | time.sleep(fd.x)
105 | except Exception as ou:
106 | logger.info(ou)
107 |
108 |
109 | def humanbytes(size):
110 | # https://stackoverflow.com/a/49361727/4723940
111 | # 2**10 = 1024
112 | if not size:
113 | return ""
114 | power = 2 ** 10
115 | n = 0
116 | Dic_powerN = {0: " ", 1: "Ki", 2: "Mi", 3: "Gi", 4: "Ti"}
117 | while size > power:
118 | size /= power
119 | n += 1
120 | return str(round(size, 2)) + " " + Dic_powerN[n] + "B"
121 |
122 |
123 | def TimeFormatter(milliseconds: int) -> str:
124 | seconds, milliseconds = divmod(int(milliseconds), 1000)
125 | minutes, seconds = divmod(seconds, 60)
126 | hours, minutes = divmod(minutes, 60)
127 | days, hours = divmod(hours, 24)
128 | tmp = (
129 | ((str(days) + "d, ") if days else "")
130 | + ((str(hours) + "h, ") if hours else "")
131 | + ((str(minutes) + "m, ") if minutes else "")
132 | + ((str(seconds) + "s, ") if seconds else "")
133 | + ((str(milliseconds) + "ms, ") if milliseconds else "")
134 | )
135 | return tmp[:-2]
136 |
--------------------------------------------------------------------------------
/tobrot/helper_funcs/split_large_files.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # (c) Akshay C / Shrimadhav U K / YK
4 |
5 | import asyncio
6 | import logging
7 | import os
8 | import time
9 |
10 | from hachoir.metadata import extractMetadata
11 | from hachoir.parser import createParser
12 | from tobrot import LOGGER, MAX_TG_SPLIT_FILE_SIZE, SP_LIT_ALGO_RITH_M
13 |
14 |
15 | async def split_large_files(input_file):
16 | working_directory = os.path.dirname(os.path.abspath(input_file))
17 | new_working_directory = os.path.join(working_directory, str(time.time()))
18 | # create download directory, if not exist
19 | if not os.path.isdir(new_working_directory):
20 | os.makedirs(new_working_directory)
21 | # if input_file.upper().endswith(("MKV", "MP4", "WEBM", "MP3", "M4A", "FLAC", "WAV")):
22 | """The below logic is DERPed, so removing temporarily
23 | """
24 | if input_file.upper().endswith(("MKV", "MP4", "WEBM", "AVI", "MOV", "OGG", "WMV", "M4V", "TS", "MPG", "MTS", "M2TS", "3GP")):
25 | # handle video / audio files here
26 | metadata = extractMetadata(createParser(input_file))
27 | total_duration = 0
28 | if metadata.has("duration"):
29 | total_duration = metadata.get("duration").seconds
30 | # proprietary logic to get the seconds to trim (at)
31 | LOGGER.info(total_duration)
32 | total_file_size = os.path.getsize(input_file)
33 | LOGGER.info(total_file_size)
34 | minimum_duration = (total_duration / total_file_size) * (MAX_TG_SPLIT_FILE_SIZE)
35 | # casting to int cuz float Time Stamp can cause errors
36 | minimum_duration = int(minimum_duration)
37 |
38 | LOGGER.info(minimum_duration)
39 | # END: proprietary
40 | start_time = 0
41 | end_time = minimum_duration
42 | base_name = os.path.basename(input_file)
43 | input_extension = base_name.split(".")[-1]
44 | LOGGER.info(input_extension)
45 |
46 | i = 0
47 | flag = False
48 |
49 | while end_time <= total_duration:
50 | LOGGER.info(i)
51 | # file name generate
52 | parted_file_name = "{}_PART_{}.{}".format(
53 | str(base_name), str(i).zfill(5), str(input_extension)
54 | )
55 |
56 | output_file = os.path.join(new_working_directory, parted_file_name)
57 | LOGGER.info(output_file)
58 | LOGGER.info(
59 | await cult_small_video(
60 | input_file, output_file, str(start_time), str(end_time)
61 | )
62 | )
63 | LOGGER.info(f"Start time {start_time}, End time {end_time}, Itr {i}")
64 |
65 | # adding offset of 3 seconds to ensure smooth playback
66 | start_time = end_time - 3
67 | end_time = end_time + minimum_duration
68 | i = i + 1
69 |
70 | if (end_time > total_duration) and not flag:
71 | end_time = total_duration
72 | flag = True
73 | elif flag:
74 | break
75 |
76 | elif SP_LIT_ALGO_RITH_M.lower() == "hjs":
77 | # handle normal files here
78 | o_d_t = os.path.join(new_working_directory, os.path.basename(input_file))
79 | o_d_t = o_d_t + "."
80 | file_genertor_command = [
81 | "split",
82 | "--numeric-suffixes=1",
83 | "--suffix-length=5",
84 | f"--bytes={MAX_TG_SPLIT_FILE_SIZE}",
85 | input_file,
86 | o_d_t,
87 | ]
88 | await run_comman_d(file_genertor_command)
89 |
90 | elif SP_LIT_ALGO_RITH_M.lower() == "rar":
91 | o_d_t = os.path.join(
92 | new_working_directory,
93 | os.path.basename(input_file),
94 | )
95 | LOGGER.info(o_d_t)
96 | file_genertor_command = [
97 | "rar",
98 | "a",
99 | f"-v{MAX_TG_SPLIT_FILE_SIZE}b",
100 | "-m0",
101 | o_d_t,
102 | input_file,
103 | ]
104 | await run_comman_d(file_genertor_command)
105 | try:
106 | os.remove(input_file)
107 | except Exception as r:
108 | LOGGER.error(r)
109 | return new_working_directory
110 |
111 |
112 | async def cult_small_video(video_file, out_put_file_name, start_time, end_time):
113 | file_genertor_command = [
114 | "ffmpeg",
115 | "-hide_banner",
116 | "-i",
117 | video_file,
118 | "-ss",
119 | start_time,
120 | "-to",
121 | end_time,
122 | "-async",
123 | "1",
124 | "-strict",
125 | "-2",
126 | "-c",
127 | "copy",
128 | out_put_file_name,
129 | ]
130 | process = await asyncio.create_subprocess_exec(
131 | *file_genertor_command,
132 | # stdout must a pipe to be accessible as process.stdout
133 | stdout=asyncio.subprocess.PIPE,
134 | stderr=asyncio.subprocess.PIPE,
135 | )
136 | # Wait for the subprocess to finish
137 | stdout, stderr = await process.communicate()
138 | e_response = stderr.decode().strip()
139 | t_response = stdout.decode().strip()
140 | LOGGER.info(t_response)
141 | return out_put_file_name
142 |
143 |
144 | async def run_comman_d(command_list):
145 | process = await asyncio.create_subprocess_exec(
146 | *command_list,
147 | # stdout must a pipe to be accessible as process.stdout
148 | stdout=asyncio.subprocess.PIPE,
149 | stderr=asyncio.subprocess.PIPE,
150 | )
151 | # Wait for the subprocess to finish
152 | stdout, stderr = await process.communicate()
153 | e_response = stderr.decode().strip()
154 | t_response = stdout.decode().strip()
155 | return t_response, e_response
156 |
--------------------------------------------------------------------------------
/tobrot/plugins/call_back_button_handler.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # (c) Shrimadhav U K | gautamajay52 | MaxxRider
4 |
5 | import logging
6 | import os
7 | import shutil
8 |
9 | from pyrogram.types import CallbackQuery
10 | from tobrot import AUTH_CHANNEL, MAX_MESSAGE_LENGTH, LOGGER, gDict
11 | from tobrot.helper_funcs.admin_check import AdminCheck
12 | from tobrot.helper_funcs.download_aria_p_n import aria_start
13 | from tobrot.helper_funcs.youtube_dl_button import youtube_dl_call_back
14 | from tobrot.plugins.choose_rclone_config import rclone_button_callback
15 | from tobrot.plugins.status_message_fn import cancel_message_f
16 |
17 |
18 | async def button(bot, update: CallbackQuery):
19 | cb_data = update.data
20 | try:
21 | g = await AdminCheck(bot, update.message.chat.id, update.from_user.id)
22 | except Exception as ee:
23 | LOGGER.info(ee)
24 | if cb_data.startswith("gUPcancel"):
25 | cmf = cb_data.split("/")
26 | chat_id, mes_id, from_usr = cmf[1], cmf[2], cmf[3]
27 | if (int(update.from_user.id) == int(from_usr)) or g:
28 | await bot.answer_callback_query(
29 | update.id, text="Trying to cancel...", show_alert=False
30 | )
31 | gDict[int(chat_id)].append(int(mes_id))
32 | else:
33 | await bot.answer_callback_query(
34 | callback_query_id=update.id,
35 | text="This Is Not Your Leech. So, dont touch on this...😡😡",
36 | show_alert=True,
37 | cache_time=0,
38 | )
39 | return
40 | if "|" in cb_data:
41 | await bot.answer_callback_query(
42 | update.id, text="trying to download...", show_alert=False
43 | )
44 | await youtube_dl_call_back(bot, update)
45 | return
46 | if cb_data.startswith("rclone"):
47 | await bot.answer_callback_query(
48 | update.id, text="choose rclone config...", show_alert=False
49 | )
50 | await rclone_button_callback(bot, update)
51 | return
52 | # todo - remove this code if not needed in future
53 | if cb_data.startswith("cancel"):
54 | if (update.from_user.id == update.message.reply_to_message.from_user.id) or g:
55 | await bot.answer_callback_query(
56 | update.id, text="trying to cancel...", show_alert=False
57 | )
58 | if len(cb_data) > 1:
59 | i_m_s_e_g = await update.message.reply_to_message.reply_text(
60 | "checking..?", quote=True
61 | )
62 | aria_i_p = await aria_start()
63 | g_id = cb_data.split()[-1]
64 | LOGGER.info(g_id)
65 | try:
66 | downloads = aria_i_p.get_download(g_id)
67 | file_name = downloads.name
68 | LOGGER.info(
69 | aria_i_p.remove(
70 | downloads=[downloads], force=True, files=True, clean=True
71 | )
72 | )
73 | if os.path.exists(file_name):
74 | if os.path.isdir(file_name):
75 | shutil.rmtree(file_name)
76 | else:
77 | os.remove(file_name)
78 | await i_m_s_e_g.edit_text(
79 | f"Leech Cancelled by {update.from_user.first_name}"
80 | )
81 | except Exception as e:
82 | await i_m_s_e_g.edit_text("FAILED\n\n" + str(e) + "\n#error")
83 | else:
84 | await bot.answer_callback_query(
85 | callback_query_id=update.id,
86 | text="who are you? 🤪🤔🤔🤔",
87 | show_alert=True,
88 | cache_time=0,
89 | )
90 | elif cb_data == "fuckingdo":
91 | if (update.from_user.id in AUTH_CHANNEL) or g:
92 | await bot.answer_callback_query(
93 | update.id, text="trying to delete...", show_alert=False
94 | )
95 | g_d_list = [
96 | "app.json",
97 | "venv",
98 | "rclone.conf",
99 | "rclone_bak.conf",
100 | ".gitignore",
101 | "_config.yml",
102 | "COPYING",
103 | "Dockerfile",
104 | "extract",
105 | "Procfile",
106 | ".heroku",
107 | ".profile.d",
108 | "rclone.jpg",
109 | "README.md",
110 | "requirements.txt",
111 | "runtime.txt",
112 | "start.sh",
113 | "tobrot",
114 | "gautam",
115 | "Torrentleech-Gdrive.txt",
116 | "vendor",
117 | "LeechBot.session",
118 | "LeechBot.session-journal",
119 | "config.env",
120 | "sample_config.env",
121 | ]
122 | g_list = os.listdir()
123 | LOGGER.info(g_list)
124 | g_del_list = list(set(g_list) - set(g_d_list))
125 | LOGGER.info(g_del_list)
126 | if len(g_del_list) != 0:
127 | for f in g_del_list:
128 | if os.path.isfile(f):
129 | os.remove(f)
130 | else:
131 | shutil.rmtree(f)
132 | await update.message.edit_text(f"Deleted {len(g_del_list)} objects 🚮")
133 | else:
134 | await update.message.edit_text("Nothing to clear 🙄")
135 | else:
136 | await update.message.edit_text("You are not allowed to do that 🤭")
137 | elif cb_data == "fuckoff":
138 | await bot.answer_callback_query(
139 | update.id, text="trying to cancel...", show_alert=False
140 | )
141 | await update.message.edit_text("Okay! fine 🤬")
--------------------------------------------------------------------------------
/tobrot/helper_funcs/create_compressed_archive.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # (c) Shrimadhav U K | gautamajay52
4 |
5 | import asyncio
6 | import logging
7 | import os
8 | import shutil
9 | import subprocess
10 |
11 | from tobrot import LOGGER
12 |
13 |
14 | async def create_archive(input_directory):
15 | return_name = None
16 | if os.path.exists(input_directory):
17 | base_dir_name = os.path.basename(input_directory)
18 | compressed_file_name = f"{base_dir_name}.tar.gz"
19 | # #BlameTelegram
20 | suffix_extention_length = 1 + 3 + 1 + 2
21 | if len(base_dir_name) > (64 - suffix_extention_length):
22 | compressed_file_name = base_dir_name[0 : (64 - suffix_extention_length)]
23 | compressed_file_name += ".tar.gz"
24 | # fix for https://t.me/c/1434259219/13344
25 | file_genertor_command = [
26 | "tar",
27 | "-zcvf",
28 | compressed_file_name,
29 | f"{input_directory}",
30 | ]
31 | process = await asyncio.create_subprocess_exec(
32 | *file_genertor_command,
33 | # stdout must a pipe to be accessible as process.stdout
34 | stdout=asyncio.subprocess.PIPE,
35 | stderr=asyncio.subprocess.PIPE,
36 | )
37 | # Wait for the subprocess to finish
38 | stdout, stderr = await process.communicate()
39 | LOGGER.error(stderr.decode().strip())
40 | if os.path.exists(compressed_file_name):
41 | try:
42 | shutil.rmtree(input_directory)
43 | except:
44 | pass
45 | return_name = compressed_file_name
46 | return return_name
47 |
48 |
49 | # @gautamajay52
50 |
51 |
52 | async def unzip_me(input_directory):
53 | return_name = None
54 | if os.path.exists(input_directory):
55 | base_dir_name = os.path.basename(input_directory)
56 | # uncompressed_file_name = os.path.splitext(base_dir_name)[0]
57 | uncompressed_file_name = get_base_name(base_dir_name)
58 | LOGGER.info(uncompressed_file_name)
59 | g_cmd = ["./extract", f"{input_directory}"]
60 | ga_utam = await asyncio.create_subprocess_exec(
61 | *g_cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
62 | )
63 | # Wait for the subprocess to finish
64 | gau, tam = await ga_utam.communicate()
65 | LOGGER.info(gau.decode().strip())
66 | LOGGER.info(tam.decode().strip())
67 | if os.path.exists(uncompressed_file_name):
68 | try:
69 | os.remove(input_directory)
70 | except:
71 | pass
72 | return_name = uncompressed_file_name
73 | return return_name
74 |
75 |
76 | #
77 |
78 |
79 | async def untar_me(input_directory):
80 | return_name = None
81 | if os.path.exists(input_directory):
82 | print(input_directory)
83 | base_dir_name = os.path.basename(input_directory)
84 | uncompressed_file_name = os.path.splitext(base_dir_name)[0]
85 | m_k_gaut = ["mkdir", f"{uncompressed_file_name}"]
86 | await asyncio.create_subprocess_exec(
87 | *m_k_gaut, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
88 | )
89 | g_cmd_t = [
90 | "tar",
91 | "-xvf",
92 | f"/app/{base_dir_name}",
93 | "-C",
94 | f"{uncompressed_file_name}",
95 | ]
96 | bc_kanger = await asyncio.create_subprocess_exec(
97 | *g_cmd_t, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
98 | )
99 | # Wait for the subprocess to finish
100 | mc, kanger = await bc_kanger.communicate()
101 | LOGGER.info(mc)
102 | LOGGER.info(kanger)
103 | # e_response = stderr.decode().strip()
104 | # t_response = stdout.decode().strip()
105 | if os.path.exists(uncompressed_file_name):
106 | try:
107 | os.remove(input_directory)
108 | except:
109 | pass
110 | return_name = uncompressed_file_name
111 | LOGGER.info(return_name)
112 | return return_name
113 |
114 |
115 | #
116 |
117 |
118 | async def unrar_me(input_directory):
119 | return_name = None
120 | if os.path.exists(input_directory):
121 | base_dir_name = os.path.basename(input_directory)
122 | uncompressed_file_name = os.path.splitext(base_dir_name)[0]
123 | m_k_gau = ["mkdir", f"{uncompressed_file_name}"]
124 | await asyncio.create_subprocess_exec(
125 | *m_k_gau, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
126 | )
127 | print(base_dir_name)
128 | gau_tam_r = ["unrar", "x", f"{base_dir_name}", f"{uncompressed_file_name}"]
129 | jai_hind = await asyncio.create_subprocess_exec(
130 | *gau_tam_r, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
131 | )
132 | # Wait for the subprocess to finish
133 | jai, hind = await jai_hind.communicate()
134 | LOGGER.info(jai)
135 | LOGGER.info(hind)
136 | # e_response = stderr.decode().strip()
137 | # t_response = stdout.decode().strip()
138 | if os.path.exists(uncompressed_file_name):
139 | try:
140 | os.remove(input_directory)
141 | except:
142 | pass
143 | return_name = uncompressed_file_name
144 | LOGGER.info(return_name)
145 | return return_name
146 |
147 |
148 | def get_base_name(orig_path: str):
149 | if orig_path.endswith(".tar.bz2"):
150 | return orig_path.replace(".tar.bz2", "")
151 | elif orig_path.endswith(".tar.gz"):
152 | return orig_path.replace(".tar.gz", "")
153 | elif orig_path.endswith(".bz2"):
154 | return orig_path.replace(".bz2", "")
155 | elif orig_path.endswith(".gz"):
156 | return orig_path.replace(".gz", "")
157 | elif orig_path.endswith(".tar"):
158 | return orig_path.replace(".tar", "")
159 | elif orig_path.endswith(".tbz2"):
160 | return orig_path.replace("tbz2", "")
161 | elif orig_path.endswith(".tgz"):
162 | return orig_path.replace(".tgz", "")
163 | elif orig_path.endswith(".zip"):
164 | return orig_path.replace(".zip", "")
165 | elif orig_path.endswith(".7z"):
166 | return orig_path.replace(".7z", "")
167 | elif orig_path.endswith(".Z"):
168 | return orig_path.replace(".Z", "")
169 | elif orig_path.endswith(".rar"):
170 | return orig_path.replace(".rar", "")
171 | else:
172 | raise Exception("File format not supported for extraction")
173 |
--------------------------------------------------------------------------------
/tobrot/helper_funcs/direct_link_generator.py:
--------------------------------------------------------------------------------
1 | # Copyright (C) 2019 The Raphielscape Company LLC.
2 | #
3 | # Licensed under the Raphielscape Public License, Version 1.c (the "License");
4 | # you may not use this file except in compliance with the License.
5 | #
6 | """ Helper Module containing various sites direct links generators. This module is copied and modified as per need
7 | from https://github.com/AvinashReddy3108/PaperplaneExtended . I hereby take no credit of the following code other
8 | than the modifications. See https://github.com/AvinashReddy3108/PaperplaneExtended/commits/master/userbot/modules/direct_links.py
9 | for original authorship. """
10 |
11 | import json
12 | import re
13 | import urllib.parse
14 | from os import popen
15 | from random import choice
16 | from js2py import EvalJs
17 | import requests
18 | from bs4 import BeautifulSoup
19 |
20 | from tobrot.helper_funcs.exceptions import DirectDownloadLinkException
21 |
22 |
23 | def direct_link_generator(text_url: str):
24 | """ direct links generator """
25 | if not text_url:
26 | raise DirectDownloadLinkException("`No links found!`")
27 | elif 'zippyshare.com' in text_url:
28 | return zippy_share(text_url)
29 | elif 'yadi.sk' in text_url:
30 | return yandex_disk(text_url)
31 | elif 'cloud.mail.ru' in text_url:
32 | return cm_ru(text_url)
33 | elif 'mediafire.com' in text_url:
34 | return mediafire(text_url)
35 | elif 'osdn.net' in text_url:
36 | return osdn(text_url)
37 | elif 'github.com' in text_url:
38 | return github(text_url)
39 | elif 'racaty.net' in text_url:
40 | return racaty(text_url)
41 | else:
42 | raise DirectDownloadLinkException(f'No Direct link function found for {text_url}')
43 |
44 |
45 | def zippy_share(url: str) -> str:
46 | link = re.findall("https:/.(.*?).zippyshare", url)[0]
47 | response_content = (requests.get(url)).content
48 | bs_obj = BeautifulSoup(response_content, "lxml")
49 |
50 | try:
51 | js_script = bs_obj.find("div", {"class": "center",}).find_all(
52 | "script"
53 | )[1]
54 | except:
55 | js_script = bs_obj.find("div", {"class": "right",}).find_all(
56 | "script"
57 | )[0]
58 |
59 | js_content = re.findall(r'\.href.=."/(.*?)";', str(js_script))
60 | js_content = 'var x = "/' + js_content[0] + '"'
61 |
62 | evaljs = EvalJs()
63 | setattr(evaljs, "x", None)
64 | evaljs.execute(js_content)
65 | js_content = getattr(evaljs, "x")
66 |
67 | return f"https://{link}.zippyshare.com{js_content}"
68 |
69 |
70 | def yandex_disk(url: str) -> str:
71 | """ Yandex.Disk direct links generator
72 | Based on https://github.com/wldhx/yadisk-direct"""
73 | try:
74 | text_url = re.findall(r'\bhttps?://.*yadi\.sk\S+', url)[0]
75 | except IndexError:
76 | reply = "`No Yandex.Disk links found`\n"
77 | return reply
78 | api = 'https://cloud-api.yandex.net/v1/disk/public/resources/download?public_key={}'
79 | try:
80 | dl_url = requests.get(api.format(text_url)).json()['href']
81 | return dl_url
82 | except KeyError:
83 | raise DirectDownloadLinkException("`Error: File not found / Download limit reached`\n")
84 |
85 |
86 | def cm_ru(url: str) -> str:
87 | """ cloud.mail.ru direct links generator
88 | Using https://github.com/JrMasterModelBuilder/cmrudl.py"""
89 | reply = ''
90 | try:
91 | text_url = re.findall(r'\bhttps?://.*cloud\.mail\.ru\S+', url)[0]
92 | except IndexError:
93 | raise DirectDownloadLinkException("`No cloud.mail.ru links found`\n")
94 | command = f'vendor/cmrudl.py/cmrudl -s {text_url}'
95 | result = popen(command).read()
96 | result = result.splitlines()[-1]
97 | try:
98 | data = json.loads(result)
99 | except json.decoder.JSONDecodeError:
100 | raise DirectDownloadLinkException("`Error: Can't extract the link`\n")
101 | dl_url = data['download']
102 | return dl_url
103 |
104 |
105 | def mediafire(url: str) -> str:
106 | """ MediaFire direct links generator """
107 | try:
108 | text_url = re.findall(r'\bhttps?://.*mediafire\.com\S+', url)[0]
109 | except IndexError:
110 | raise DirectDownloadLinkException("`No MediaFire links found`\n")
111 | page = BeautifulSoup(requests.get(text_url).content, 'lxml')
112 | info = page.find('a', {'aria-label': 'Download file'})
113 | dl_url = info.get('href')
114 | return dl_url
115 |
116 |
117 | def osdn(url: str) -> str:
118 | """ OSDN direct links generator """
119 | osdn_link = 'https://osdn.net'
120 | try:
121 | text_url = re.findall(r'\bhttps?://.*osdn\.net\S+', url)[0]
122 | except IndexError:
123 | raise DirectDownloadLinkException("`No OSDN links found`\n")
124 | page = BeautifulSoup(
125 | requests.get(text_url, allow_redirects=True).content, 'lxml')
126 | info = page.find('a', {'class': 'mirror_link'})
127 | text_url = urllib.parse.unquote(osdn_link + info['href'])
128 | mirrors = page.find('form', {'id': 'mirror-select-form'}).findAll('tr')
129 | urls = []
130 | for data in mirrors[1:]:
131 | mirror = data.find('input')['value']
132 | urls.append(re.sub(r'm=(.*)&f', f'm={mirror}&f', text_url))
133 | return urls[0]
134 |
135 |
136 | def github(url: str) -> str:
137 | """ GitHub direct links generator """
138 | try:
139 | text_url = re.findall(r'\bhttps?://.*github\.com.*releases\S+', url)[0]
140 | except IndexError:
141 | raise DirectDownloadLinkException("`No GitHub Releases links found`\n")
142 | download = requests.get(text_url, stream=True, allow_redirects=False)
143 | try:
144 | dl_url = download.headers["location"]
145 | return dl_url
146 | except KeyError:
147 | raise DirectDownloadLinkException("`Error: Can't extract the link`\n")
148 |
149 |
150 | def useragent():
151 | """
152 | useragent random setter
153 | """
154 | useragents = BeautifulSoup(
155 | requests.get(
156 | 'https://developers.whatismybrowser.com/'
157 | 'useragents/explore/operating_system_name/android/').content,
158 | 'lxml').findAll('td', {'class': 'useragent'})
159 | user_agent = choice(useragents)
160 | return user_agent.text
161 |
162 | def racaty(url: str) -> str:
163 | dl_url = ''
164 | try:
165 | text_url = re.findall(r'\bhttps?://.*racaty\.net\S+', url)[0]
166 | except IndexError:
167 | raise DirectDownloadLinkException("`No Racaty links found`\n")
168 | reqs=requests.get(text_url)
169 | bss=BeautifulSoup(reqs.text,'html.parser')
170 | op=bss.find('input',{'name':'op'})['value']
171 | id=bss.find('input',{'name':'id'})['value']
172 | rep=requests.post(text_url,data={'op':op,'id':id})
173 | bss2=BeautifulSoup(rep.text,'html.parser')
174 | dl_url=bss2.find('a',{'id':'uniqueExpirylink'})['href']
175 | return dl_url
--------------------------------------------------------------------------------
/tobrot/helper_funcs/cloneHelper.py:
--------------------------------------------------------------------------------
1 | # This is code to clone the gdrive link using the gclone, all credit goes to the developer who has developed the rclone/glclone
2 | #!/usr/bin/env python3
3 | # -*- coding: utf-8 -*-
4 | # (c) gautamajay52
5 | # (c) MaxxRider
6 |
7 | import asyncio
8 | import logging
9 | import os
10 | import re
11 | import subprocess
12 |
13 | import pyrogram.types as pyrogram
14 | import requests
15 | from tobrot import (
16 | DESTINATION_FOLDER,
17 | DOWNLOAD_LOCATION,
18 | EDIT_SLEEP_TIME_OUT,
19 | INDEX_LINK,
20 | LOGGER,
21 | RCLONE_CONFIG,
22 | TG_MAX_FILE_SIZE,
23 | UPLOAD_AS_DOC,
24 | )
25 |
26 |
27 | class CloneHelper:
28 | def __init__(self, mess):
29 | self.g_id = ""
30 | self.mess = mess
31 | self.name = ""
32 | self.out = b""
33 | self.err = b""
34 | self.lsg = ""
35 | self.filee = ""
36 | self.u_id = self.mess.from_user.id
37 | self.dname = ""
38 |
39 | def config(self):
40 | if not os.path.exists("rclone.conf"):
41 | with open("rclone.conf", "w+", newline="\n", encoding="utf-8") as fole:
42 | fole.write(f"{RCLONE_CONFIG}")
43 | if os.path.exists("rclone.conf"):
44 | with open("rclone.conf", "r+") as file:
45 | con = file.read()
46 | self.dname = re.findall("\[(.*)\]", con)[0]
47 |
48 | def get_id(self):
49 | mes = self.mess
50 | txt = mes.reply_to_message.text
51 | LOGGER.info(txt)
52 | mess = txt.split(" ", maxsplit=1)
53 | if len(mess) == 2:
54 | self.g_id = mess[0]
55 | LOGGER.info(self.g_id)
56 | self.name = mess[1]
57 | LOGGER.info(self.name)
58 | else:
59 | self.g_id = mess[0]
60 | LOGGER.info(self.g_id)
61 | self.name = ""
62 | return self.g_id, self.name
63 |
64 | async def link_gen_size(self):
65 | if self.name is not None:
66 | _drive = ""
67 | if self.name == self.filee:
68 | _flag = "--files-only"
69 | _up = "File"
70 | _ui = ""
71 | else:
72 | _flag = "--dirs-only"
73 | _up = "Folder"
74 | _drive = "folderba"
75 | _ui = "/"
76 | g_name = re.escape(self.name)
77 | LOGGER.info(g_name)
78 | destination = f"{DESTINATION_FOLDER}"
79 |
80 | with open("filter1.txt", "w+", encoding="utf-8") as filter1:
81 | print(f"+ {g_name}{_ui}\n- *", file=filter1)
82 |
83 | g_a_u = [
84 | "rclone",
85 | "lsf",
86 | "--config=./rclone.conf",
87 | "-F",
88 | "i",
89 | "--filter-from=./filter1.txt",
90 | f"{_flag}",
91 | f"{self.dname}:{destination}",
92 | ]
93 | LOGGER.info(g_a_u)
94 | gau_tam = await asyncio.create_subprocess_exec(
95 | *g_a_u, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
96 | )
97 | gau, tam = await gau_tam.communicate()
98 | LOGGER.info(gau)
99 | gautam = gau.decode("utf-8")
100 | LOGGER.info(gautam)
101 | LOGGER.info(tam.decode("utf-8"))
102 |
103 | if _drive == "folderba":
104 | gautii = f"https://drive.google.com/folderview?id={gautam}"
105 | else:
106 | gautii = f"https://drive.google.com/file/d/{gautam}/view?usp=drivesdk"
107 |
108 | LOGGER.info(gautii)
109 | gau_link = re.search("(?P
209 |
210 |
211 |
212 |
220 |
221 |
222 |
223 |
224 |
225 |
226 |
227 |
228 |
229 |
230 |
231 |
232 |
233 |
234 |
235 |
236 |
237 |
238 |
239 |
240 |
241 |
259 |
260 |
261 |
262 |
263 |
264 |
265 |
266 |
267 |
268 |
269 |
270 |
271 |
272 |
273 |
274 |
275 |
276 |
/cancel {file.gid}"
96 | msg += "\n"
97 |
98 | hr, mi, se = up_time(time.time() - BOT_START_TIME)
99 | total, used, free = shutil.disk_usage(".")
100 | ram = psutil.virtual_memory().percent
101 | cpu = psutil.cpu_percent()
102 | total = humanbytes(total)
103 | used = humanbytes(used)
104 | free = humanbytes(free)
105 |
106 | ms_g = (
107 | f"Bot Uptime: {hr} : {mi} : {se}\n"
108 | f"T: {total} U: {used} F: {free}\n"
109 | f"RAM: {ram}% CPU: {cpu}%\n"
110 | )
111 | if msg == "":
112 | msg = "🤷♂️ No Active, Queued or Paused TORRENTs"
113 | msg = ms_g + "\n" + msg
114 | await to_edit.edit(msg)
115 | break
116 | msg = msg + "\n" + ms_g
117 | if len(msg) > MAX_MESSAGE_LENGTH: # todo - will catch later
118 | with io.BytesIO(str.encode(msg)) as out_file:
119 | out_file.name = "status.text"
120 | await client.send_document(
121 | chat_id=message.chat.id,
122 | document=out_file,
123 | )
124 | break
125 | else:
126 | if msg != prev_mess:
127 | try:
128 | await to_edit.edit(msg, parse_mode="html")
129 | except MessageIdInvalid as df:
130 | break
131 | except MessageNotModified as ep:
132 | LOGGER.info(ep)
133 | await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
134 | except FloodWait as e:
135 | LOGGER.info(e)
136 | time.sleep(e.x)
137 | await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
138 | prev_mess = msg
139 |
140 |
141 | async def cancel_message_f(client, message):
142 | if len(message.command) > 1:
143 | # /cancel command
144 | i_m_s_e_g = await message.reply_text("checking..?", quote=True)
145 | aria_i_p = await aria_start()
146 | g_id = message.command[1].strip()
147 | LOGGER.info(g_id)
148 | try:
149 | downloads = aria_i_p.get_download(g_id)
150 | name = downloads.name
151 | size = downloads.total_length_string()
152 | gid_list = downloads.followed_by_ids
153 | downloads = [downloads]
154 | if len(gid_list) != 0:
155 | downloads = aria_i_p.get_downloads(gid_list)
156 | aria_i_p.remove(downloads=downloads, force=True, files=True, clean=True)
157 | await i_m_s_e_g.edit_text(
158 | f"Download cancelled :\n{name} ({size}) by {message.from_user.first_name}"
159 | )
160 | except Exception as e:
161 | await i_m_s_e_g.edit_text("FAILED\n\n" + str(e) + "\n#error")
162 | else:
163 | await message.delete()
164 |
165 |
166 | async def exec_message_f(client, message):
167 | if message.from_user.id in AUTH_CHANNEL:
168 | DELAY_BETWEEN_EDITS = 0.3
169 | PROCESS_RUN_TIME = 100
170 | cmd = message.text.split(" ", maxsplit=1)[1]
171 |
172 | reply_to_id = message.message_id
173 | if message.reply_to_message:
174 | reply_to_id = message.reply_to_message.message_id
175 |
176 | start_time = time.time() + PROCESS_RUN_TIME
177 | process = await asyncio.create_subprocess_shell(
178 | cmd, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
179 | )
180 | stdout, stderr = await process.communicate()
181 | e = stderr.decode()
182 | if not e:
183 | e = "No Error"
184 | o = stdout.decode()
185 | if not o:
186 | o = "No Output"
187 | else:
188 | _o = o.split("\n")
189 | o = "`\n".join(_o)
190 | OUTPUT = f"**QUERY:**\n__Command:__\n`{cmd}` \n__PID:__\n`{process.pid}`\n\n**stderr:** \n`{e}`\n**Output:**\n{o}"
191 |
192 | if len(OUTPUT) > MAX_MESSAGE_LENGTH:
193 | with io.BytesIO(str.encode(OUTPUT)) as out_file:
194 | out_file.name = "exec.text"
195 | await client.send_document(
196 | chat_id=message.chat.id,
197 | document=out_file,
198 | caption=cmd,
199 | disable_notification=True,
200 | reply_to_message_id=reply_to_id,
201 | )
202 | await message.delete()
203 | else:
204 | await message.reply_text(OUTPUT)
205 |
206 |
207 | async def upload_document_f(client, message):
208 | imsegd = await message.reply_text("processing ...")
209 | if message.from_user.id in AUTH_CHANNEL:
210 | if " " in message.text:
211 | recvd_command, local_file_name = message.text.split(" ", 1)
212 | recvd_response = await upload_to_tg(
213 | imsegd, local_file_name, message.from_user.id, {}, client
214 | )
215 | LOGGER.info(recvd_response)
216 | await imsegd.delete()
217 |
218 |
219 | async def eval_message_f(client, message):
220 | if message.from_user.id in AUTH_CHANNEL:
221 | status_message = await message.reply_text("Processing ...")
222 | cmd = message.text.split(" ", maxsplit=1)[1]
223 |
224 | reply_to_id = message.message_id
225 | if message.reply_to_message:
226 | reply_to_id = message.reply_to_message.message_id
227 |
228 | old_stderr = sys.stderr
229 | old_stdout = sys.stdout
230 | redirected_output = sys.stdout = io.StringIO()
231 | redirected_error = sys.stderr = io.StringIO()
232 | stdout, stderr, exc = None, None, None
233 |
234 | try:
235 | await aexec(cmd, client, message)
236 | except Exception:
237 | exc = traceback.format_exc()
238 |
239 | stdout = redirected_output.getvalue()
240 | stderr = redirected_error.getvalue()
241 | sys.stdout = old_stdout
242 | sys.stderr = old_stderr
243 |
244 | evaluation = ""
245 | if exc:
246 | evaluation = exc
247 | elif stderr:
248 | evaluation = stderr
249 | elif stdout:
250 | evaluation = stdout
251 | else:
252 | evaluation = "Success"
253 |
254 | final_output = (
255 | "EVAL: {}\n\nOUTPUT:\n{} \n".format(
256 | cmd, evaluation.strip()
257 | )
258 | )
259 |
260 | if len(final_output) > MAX_MESSAGE_LENGTH:
261 | with open("eval.text", "w+", encoding="utf8") as out_file:
262 | out_file.write(str(final_output))
263 | await message.reply_document(
264 | document="eval.text",
265 | caption=cmd,
266 | disable_notification=True,
267 | reply_to_message_id=reply_to_id,
268 | )
269 | os.remove("eval.text")
270 | await status_message.delete()
271 | else:
272 | await status_message.edit(final_output)
273 |
274 |
275 | async def aexec(code, client, message):
276 | exec(
277 | f"async def __aexec(client, message): "
278 | + "".join(f"\n {l}" for l in code.split("\n"))
279 | )
280 | return await locals()["__aexec"](client, message)
281 |
282 |
283 | def up_time(time_taken):
284 | hours, _hour = divmod(time_taken, 3600)
285 | minutes, seconds = divmod(_hour, 60)
286 | return round(hours), round(minutes), round(seconds)
287 |
288 |
289 | async def upload_log_file(client, message):
290 | g = await AdminCheck(client, message.chat.id, message.from_user.id)
291 | if g:
292 | await message.reply_document("Torrentleech-Gdrive.txt")
--------------------------------------------------------------------------------
/tobrot/plugins/incoming_message_fn.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # (c) Shrimadhav U K | gautamajay52 | MaxxRider
4 |
5 | import asyncio
6 | import logging
7 | import os
8 | import time
9 | from pathlib import Path
10 | import aria2p
11 | import requests
12 | from tobrot import (
13 | DOWNLOAD_LOCATION,
14 | GLEECH_COMMAND,
15 | GLEECH_UNZIP_COMMAND,
16 | GLEECH_ZIP_COMMAND,
17 | LEECH_COMMAND,
18 | LEECH_UNZIP_COMMAND,
19 | LEECH_ZIP_COMMAND,
20 | LOGGER,
21 | YTDL_COMMAND,
22 | GPYTDL_COMMAND,
23 | PYTDL_COMMAND,
24 | )
25 | from tobrot.helper_funcs.admin_check import AdminCheck
26 | from tobrot.helper_funcs.cloneHelper import CloneHelper
27 | from tobrot.helper_funcs.download import download_tg
28 | from tobrot.helper_funcs.download_aria_p_n import (
29 | aria_start,
30 | call_apropriate_function,
31 | )
32 | from tobrot.helper_funcs.download_from_link import request_download
33 | from tobrot.helper_funcs.extract_link_from_message import extract_link
34 | from tobrot.helper_funcs.upload_to_tg import upload_to_tg
35 | from tobrot.helper_funcs.youtube_dl_extractor import extract_youtube_dl_formats
36 | from tobrot.helper_funcs.ytplaylist import yt_playlist_downg
37 |
38 |
39 | async def incoming_purge_message_f(client, message):
40 | """/purge command"""
41 | print(message.client)
42 | i_m_sefg2 = await message.reply_text("Purging...", quote=True)
43 | if await AdminCheck(client, message.chat.id, message.from_user.id):
44 | aria_i_p = await aria_start()
45 | # Show All Downloads
46 | downloads = aria_i_p.get_downloads()
47 | for download in downloads:
48 | LOGGER.info(download.remove(force=True))
49 | await i_m_sefg2.delete()
50 |
51 |
52 | async def incoming_message_f(client, message):
53 | """/leech command or /gleech command"""
54 | user_command = message.command[0]
55 | g_id = message.from_user.id
56 | # get link from the incoming message
57 | i_m_sefg = await message.reply_text("Processing...", quote=True)
58 | rep_mess = message.reply_to_message
59 | is_file = False
60 | dl_url = ''
61 | cf_name = ''
62 | if rep_mess:
63 | file_name = ''
64 | if rep_mess.media:
65 | file = [rep_mess.document, rep_mess.video, rep_mess.audio]
66 | file_name = [fi for fi in file if fi is not None][0].file_name
67 | if not rep_mess.media or str(file_name).lower().endswith(".torrent"):
68 | dl_url, cf_name, _, _ = await extract_link(message.reply_to_message, "LEECH")
69 | LOGGER.info(dl_url)
70 | LOGGER.info(cf_name)
71 | else:
72 | if user_command == LEECH_COMMAND.lower():
73 | await i_m_sefg.edit("No download source provided 🙄")
74 | return
75 | is_file = True
76 | dl_url = rep_mess
77 | elif len(message.command) == 2:
78 | dl_url = message.command[1]
79 | LOGGER.info(dl_url)
80 |
81 | else:
82 | await i_m_sefg.edit("Hey Dude !\n\n 🐈 Reply with Direct /Torrent Link")
83 | return
84 | if dl_url is not None:
85 |
86 | current_user_id = message.from_user.id
87 | # create an unique directory
88 | new_download_location = os.path.join(
89 | DOWNLOAD_LOCATION, str(current_user_id), str(time.time())
90 | )
91 | # create download directory, if not exist
92 | if not os.path.isdir(new_download_location):
93 | os.makedirs(new_download_location)
94 | aria_i_p = ''
95 | if not is_file:
96 | await i_m_sefg.edit_text("Extracting links...")
97 | # start the aria2c daemon
98 | aria_i_p = await aria_start()
99 | # LOGGER.info(aria_i_p)
100 |
101 | await i_m_sefg.edit_text("Added to downloads. Send /status")
102 | # try to download the "link"
103 | is_zip = False
104 | is_cloud = False
105 | is_unzip = False
106 |
107 | if user_command == LEECH_UNZIP_COMMAND.lower():
108 | is_unzip = True
109 | elif user_command == LEECH_ZIP_COMMAND.lower():
110 | is_zip = True
111 |
112 | if user_command == GLEECH_COMMAND.lower():
113 | is_cloud = True
114 | if user_command == GLEECH_UNZIP_COMMAND.lower():
115 | is_cloud = True
116 | is_unzip = True
117 | elif user_command == GLEECH_ZIP_COMMAND.lower():
118 | is_cloud = True
119 | is_zip = True
120 | sagtus, err_message = await call_apropriate_function(
121 | aria_i_p,
122 | dl_url,
123 | new_download_location,
124 | i_m_sefg,
125 | is_zip,
126 | cf_name,
127 | is_cloud,
128 | is_unzip,
129 | is_file,
130 | message,
131 | client,
132 | )
133 | if not sagtus:
134 | # if FAILED, display the error message
135 | await i_m_sefg.edit_text(err_message)
136 | else:
137 | await i_m_sefg.edit_text(
138 | f"**FCUK**! wat have you entered. \nAPI Error: {cf_name}"
139 | )
140 |
141 |
142 | async def incoming_youtube_dl_f(client, message):
143 | """ /ytdl command """
144 | current_user_id = message.from_user.id
145 |
146 | i_m_sefg = await message.reply_text("Prrocessing...🔃", quote=True)
147 | # LOGGER.info(message)
148 | # extract link from message
149 | if message.reply_to_message:
150 | dl_url, cf_name, yt_dl_user_name, yt_dl_pass_word = await extract_link(
151 | message.reply_to_message, "YTDL"
152 | )
153 | LOGGER.info(dl_url)
154 | LOGGER.info(cf_name)
155 | elif len(message.command) == 2:
156 | dl_url = message.command[1]
157 | LOGGER.info(dl_url)
158 | cf_name = None
159 | yt_dl_user_name = None
160 | yt_dl_pass_word = None
161 | cf_name = None
162 | else:
163 | await i_m_sefg.edit("🐈 Oops Reply To YTDL Supported Link.")
164 | return
165 | if dl_url is not None:
166 | await i_m_sefg.edit_text("Getting Available Formate...")
167 | # create an unique directory
168 | user_working_dir = os.path.join(DOWNLOAD_LOCATION, str(current_user_id))
169 | # create download directory, if not exist
170 | if not os.path.isdir(user_working_dir):
171 | os.makedirs(user_working_dir)
172 | # list the formats, and display in button markup formats
173 | thumb_image, text_message, reply_markup = await extract_youtube_dl_formats(
174 | dl_url, cf_name, yt_dl_user_name, yt_dl_pass_word, user_working_dir
175 | )
176 | if thumb_image is not None:
177 | req = requests.get(f"{thumb_image}")
178 | thumb_img = f"{current_user_id}.jpg"
179 | with open(thumb_img, "wb") as thumb:
180 | thumb.write(req.content)
181 | await message.reply_photo(
182 | # text_message,
183 | photo=thumb_img,
184 | quote=True,
185 | caption=text_message,
186 | reply_markup=reply_markup,
187 | )
188 | await i_m_sefg.delete()
189 | else:
190 | await i_m_sefg.edit_text(text=text_message, reply_markup=reply_markup)
191 | else:
192 | await i_m_sefg.edit_text(
193 | "**FCUK**! wat have you entered.\n"
194 | f"API Error: {cf_name}"
195 | )
196 |
197 |
198 | # playlist
199 | async def g_yt_playlist(client, message):
200 | """ /pytdl command """
201 | user_command = message.command[0]
202 | usr_id = message.from_user.id
203 | is_cloud = False
204 | url = None
205 | if message.reply_to_message:
206 | url = message.reply_to_message.text
207 | if user_command == GPYTDL_COMMAND.lower():
208 | is_cloud = True
209 | elif len(message.command) == 2:
210 | url = message.command[1]
211 | if user_command == GPYTDL_COMMAND.lower():
212 | is_cloud = True
213 | else:
214 | await message.reply_text(" Reply with Youtube Playlist link", quote=True)
215 | return
216 | if "youtube.com/playlist" in url:
217 | u_men = message.from_user.mention
218 | i_m_sefg = await message.reply_text(
219 | f"Ok Fine 🐈 {u_men} Bro!!:\n Your Request has been ADDED\n\n Please wait until Upload",
220 | parse_mode="html",
221 | )
222 | await yt_playlist_downg(message, i_m_sefg, client, is_cloud)
223 |
224 | else:
225 | await message.reply_text("YouTube playlist link only 🙄", quote=True)
226 |
227 | #
228 |
229 |
230 | async def g_clonee(client, message):
231 | """ /gclone command """
232 | g_id = message.from_user.id
233 | if message.reply_to_message is not None:
234 | LOGGER.info(message.reply_to_message.text)
235 | gclone = CloneHelper(message)
236 | gclone.config()
237 | a, h = gclone.get_id()
238 | LOGGER.info(a)
239 | LOGGER.info(h)
240 | await gclone.gcl()
241 | await gclone.link_gen_size()
242 | else:
243 | await message.reply_text(
244 | "You should reply to a message, which format should be [ID of Gdrive file/folder Name of the file/folder]\nOr read Github for detailled information"
245 | )
246 |
247 |
248 | async def rename_tg_file(client, message):
249 | usr_id = message.from_user.id
250 | if not message.reply_to_message:
251 | await message.reply("Reply with Telegram Media None", quote=True)
252 | return
253 | if len(message.command) > 1:
254 | new_name = (
255 | str(Path().resolve()) + "/" +
256 | message.text.split(" ", maxsplit=1)[1].strip()
257 | )
258 | file, mess_age = await download_tg(client, message)
259 | try:
260 | if file:
261 | os.rename(file, new_name)
262 | else:
263 | return
264 | except Exception as g_g:
265 | LOGGER.error(g_g)
266 | await message.reply_text("g_g")
267 | response = {}
268 | final_response = await upload_to_tg(
269 | mess_age, new_name, usr_id, response, client
270 | )
271 | LOGGER.info(final_response)
272 | if not final_response:
273 | return
274 | try:
275 | message_to_send = ""
276 | for key_f_res_se in final_response:
277 | local_file_name = key_f_res_se
278 | message_id = final_response[key_f_res_se]
279 | channel_id = str(message.chat.id)[4:]
280 | private_link = f"https://t.me/c/{channel_id}/{message_id}"
281 | message_to_send += "➪ "
284 | message_to_send += local_file_name
285 | message_to_send += ""
286 | message_to_send += "\n"
287 | if message_to_send != "":
288 | mention_req_user = (
289 | f"🐈 Hey Bru!! Your Requested Files 👇\n\n"
290 | )
291 | message_to_send = mention_req_user + message_to_send
292 | message_to_send = message_to_send + "\n\n" + " #UPLOADS\n\n💫 Powered By : @TGFilmZone"
293 | else:
294 | message_to_send = "FAILED to upload files. 😞😞"
295 | await message.reply_text(
296 | text=message_to_send, quote=True, disable_web_page_preview=True
297 | )
298 | except Exception as pe:
299 | LOGGER.info(pe)
300 |
301 | else:
302 | await message.reply_text(
303 | " Oops 😬\n\nProvide Name with extension\n\n➩Example: /rename Avengers Endgame.mkv", quote=True
304 | )
305 |
--------------------------------------------------------------------------------
/tobrot/helper_funcs/download_aria_p_n.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # (c) Shrimadhav U K | gautamajay52 | MaxxRider
4 |
5 | import asyncio
6 | import logging
7 | import os
8 | import sys
9 | import time
10 | import requests
11 | import re
12 | from re import search
13 | import subprocess
14 | import hashlib
15 | import math
16 |
17 | import aria2p
18 | from pyrogram.errors import FloodWait, MessageNotModified
19 | from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, Message
20 | from tobrot import (
21 | ARIA_TWO_STARTED_PORT,
22 | AUTH_CHANNEL,
23 | CUSTOM_FILE_NAME,
24 | DOWNLOAD_LOCATION,
25 | EDIT_SLEEP_TIME_OUT,
26 | LOGGER,
27 | MAX_TIME_TO_WAIT_FOR_TORRENTS_TO_START,
28 | )
29 | from tobrot.helper_funcs.create_compressed_archive import (
30 | create_archive,
31 | get_base_name,
32 | unzip_me,
33 | )
34 | from tobrot.helper_funcs.extract_link_from_message import extract_link
35 | from tobrot.helper_funcs.upload_to_tg import upload_to_gdrive, upload_to_tg
36 | from tobrot.helper_funcs.download import download_tg
37 |
38 | from tobrot.helper_funcs.direct_link_generator import direct_link_generator
39 | from tobrot.helper_funcs.exceptions import DirectDownloadLinkException
40 |
41 | sys.setrecursionlimit(10 ** 4)
42 |
43 |
44 | async def aria_start():
45 | aria2_daemon_start_cmd = []
46 | # start the daemon, aria2c command
47 | aria2_daemon_start_cmd.append("aria2c")
48 | aria2_daemon_start_cmd.append("--conf-path=/app/tobrot/aria2/aria2.conf")
49 | aria2_daemon_start_cmd.append("--allow-overwrite=true")
50 | aria2_daemon_start_cmd.append("--daemon=true")
51 | # aria2_daemon_start_cmd.append(f"--dir={DOWNLOAD_LOCATION}")
52 | # TODO: this does not work, need to investigate this.
53 | # but for now, https://t.me/TrollVoiceBot?start=858
54 | aria2_daemon_start_cmd.append("--enable-rpc")
55 | aria2_daemon_start_cmd.append("--disk-cache=0")
56 | aria2_daemon_start_cmd.append("--follow-torrent=mem")
57 | aria2_daemon_start_cmd.append("--max-connection-per-server=16")
58 | aria2_daemon_start_cmd.append("--min-split-size=10M")
59 | aria2_daemon_start_cmd.append("--rpc-listen-all=false")
60 | aria2_daemon_start_cmd.append(f"--rpc-listen-port={ARIA_TWO_STARTED_PORT}")
61 | aria2_daemon_start_cmd.append("--rpc-max-request-size=1024M")
62 | aria2_daemon_start_cmd.append("--seed-ratio=0.01")
63 | aria2_daemon_start_cmd.append("--seed-time=1")
64 | aria2_daemon_start_cmd.append("--max-overall-upload-limit=2M")
65 | aria2_daemon_start_cmd.append("--split=16")
66 | aria2_daemon_start_cmd.append(f"--bt-stop-timeout={MAX_TIME_TO_WAIT_FOR_TORRENTS_TO_START}")
67 | #
68 | LOGGER.info(aria2_daemon_start_cmd)
69 | #
70 | process = await asyncio.create_subprocess_exec(
71 | *aria2_daemon_start_cmd,
72 | stdout=asyncio.subprocess.PIPE,
73 | stderr=asyncio.subprocess.PIPE,
74 | )
75 | stdout, stderr = await process.communicate()
76 |
77 | aria2 = aria2p.API(
78 | aria2p.Client(host="http://localhost",
79 | port=ARIA_TWO_STARTED_PORT, secret="")
80 | )
81 | return aria2
82 |
83 |
84 | def add_magnet(aria_instance, magnetic_link, c_file_name):
85 | options = None
86 | # if c_file_name is not None:
87 | # options = {
88 | # "dir": c_file_name
89 | # }
90 | try:
91 | download = aria_instance.add_magnet(magnetic_link, options=options)
92 | except Exception as e:
93 | return (
94 | False,
95 | "**FAILED** \n" + str(e) + " \n Your link is Dead 🐈",
96 | )
97 | else:
98 | return True, "" + download.gid + ""
99 |
100 |
101 | def add_torrent(aria_instance, torrent_file_path):
102 | if torrent_file_path is None:
103 | return (
104 | False,
105 | "**FAILED** \n"
106 | + str(e)
107 | + " \nsomething wrongings when trying to add TORRENT file",
108 | )
109 | if os.path.exists(torrent_file_path):
110 | # Add Torrent Into Queue
111 | try:
112 | download = aria_instance.add_torrent(
113 | torrent_file_path, uris=None, options=None, position=None
114 | )
115 | except Exception as e:
116 | return (
117 | False,
118 | "**FAILED** \n"
119 | + str(e)
120 | + " \n Your Link is Slow Dude 🐈",
121 | )
122 | else:
123 | return True, "" + download.gid + ""
124 | else:
125 | return False, "**FAILED** \nPlease try other sources to get workable link"
126 |
127 |
128 | def add_url(aria_instance, text_url, c_file_name):
129 | options = None
130 | # if c_file_name is not None:
131 | # options = {
132 | # "dir": c_file_name
133 | # }
134 | if "zippyshare.com" in text_url \
135 | or "osdn.net" in text_url \
136 | or "mediafire.com" in text_url \
137 | or "cloud.mail.ru" in text_url \
138 | or "github.com" in text_url \
139 | or "yadi.sk" in text_url \
140 | or "racaty.net" in text_url:
141 | try:
142 | urisitring = direct_link_generator(text_url)
143 | uris = [urisitring]
144 | except DirectDownloadLinkException as e:
145 | LOGGER.info(f'{text_url}: {e}')
146 | else:
147 | uris = [text_url]
148 | # Add URL Into Queue
149 | try:
150 | download = aria_instance.add_uris(uris, options=options)
151 | except Exception as e:
152 | return (
153 | False,
154 | "**FAILED** \n" +
155 | str(e) + " \nPlease do not send SLOW links. Read /help",
156 | )
157 | else:
158 | return True, "" + download.gid + ""
159 |
160 |
161 | async def call_apropriate_function(
162 | aria_instance,
163 | incoming_link,
164 | c_file_name,
165 | sent_message_to_update_tg_p,
166 | is_zip,
167 | cstom_file_name,
168 | is_cloud,
169 | is_unzip,
170 | is_file,
171 | user_message,
172 | client,
173 | ):
174 | if not is_file:
175 | if incoming_link.lower().startswith("magnet:"):
176 | sagtus, err_message = add_magnet(
177 | aria_instance, incoming_link, c_file_name)
178 | elif incoming_link.lower().endswith(".torrent"):
179 | sagtus, err_message = add_torrent(aria_instance, incoming_link)
180 | else:
181 | sagtus, err_message = add_url(
182 | aria_instance, incoming_link, c_file_name)
183 | if not sagtus:
184 | return sagtus, err_message
185 | LOGGER.info(err_message)
186 | # https://stackoverflow.com/a/58213653/4723940
187 | await check_progress_for_dl(
188 | aria_instance, err_message, sent_message_to_update_tg_p, None
189 | )
190 | if incoming_link.startswith("magnet:"):
191 | #
192 | err_message = await check_metadata(aria_instance, err_message)
193 | #
194 | await asyncio.sleep(1)
195 | if err_message is not None:
196 | await check_progress_for_dl(
197 | aria_instance, err_message, sent_message_to_update_tg_p, None
198 | )
199 | else:
200 | return False, "can't get metadata \n\n#MetaDataError"
201 | await asyncio.sleep(1)
202 | try:
203 | file = aria_instance.get_download(err_message)
204 | except aria2p.client.ClientException as ee:
205 | LOGGER.error(ee)
206 | return True, None
207 | to_upload_file = file.name
208 | com_g = file.is_complete
209 | else:
210 | await sent_message_to_update_tg_p.delete()
211 | to_upload_file, sent_message_to_update_tg_p = await download_tg(client=client, message=user_message)
212 | if not to_upload_file:
213 | return True, None
214 | com_g = True
215 | if is_zip:
216 | check_if_file = await create_archive(to_upload_file)
217 | if check_if_file is not None:
218 | to_upload_file = check_if_file
219 | #
220 | if is_unzip:
221 | try:
222 | check_ifi_file = get_base_name(to_upload_file)
223 | await unzip_me(to_upload_file)
224 | if os.path.exists(check_ifi_file):
225 | to_upload_file = check_ifi_file
226 | except Exception as ge:
227 | LOGGER.info(ge)
228 | LOGGER.info(
229 | f"Can't extract {os.path.basename(to_upload_file)}, Uploading the same file"
230 | )
231 |
232 | if to_upload_file:
233 | if CUSTOM_FILE_NAME:
234 | if os.path.isfile(to_upload_file):
235 | os.rename(to_upload_file,
236 | f"{CUSTOM_FILE_NAME}{to_upload_file}")
237 | to_upload_file = f"{CUSTOM_FILE_NAME}{to_upload_file}"
238 | else:
239 | for root, _, files in os.walk(to_upload_file):
240 | LOGGER.info(files)
241 | for org in files:
242 | p_name = f"{root}/{org}"
243 | n_name = f"{root}/{CUSTOM_FILE_NAME}{org}"
244 | os.rename(p_name, n_name)
245 | to_upload_file = to_upload_file
246 |
247 | if cstom_file_name:
248 | os.rename(to_upload_file, cstom_file_name)
249 | to_upload_file = cstom_file_name
250 | #
251 | response = {}
252 | #LOGGER.info(response)
253 | user_id = user_message.from_user.id
254 | if com_g:
255 | if is_cloud:
256 | await upload_to_gdrive(
257 | to_upload_file, sent_message_to_update_tg_p, user_message, user_id
258 | )
259 | else:
260 | final_response = await upload_to_tg(
261 | sent_message_to_update_tg_p, to_upload_file, user_id, response, client
262 | )
263 | if not final_response:
264 | return True, None
265 | try:
266 | message_to_send = ""
267 | for key_f_res_se in final_response:
268 | local_file_name = key_f_res_se
269 | message_id = final_response[key_f_res_se]
270 | channel_id = str(sent_message_to_update_tg_p.chat.id)[4:]
271 | private_link = f"https://t.me/c/{channel_id}/{message_id}"
272 | message_to_send += "👉 "
275 | message_to_send += local_file_name
276 | message_to_send += ""
277 | message_to_send += "\n"
278 | if message_to_send != "":
279 | mention_req_user = (
280 | f"Your Requested Files\n\n"
281 | )
282 | message_to_send = mention_req_user + message_to_send
283 | message_to_send = message_to_send + "\n\n" + "#uploads"
284 | else:
285 | message_to_send = "FAILED to upload files. 😞😞"
286 | await user_message.reply_text(
287 | text=message_to_send, quote=True, disable_web_page_preview=True
288 | )
289 | except Exception as go:
290 | LOGGER.error(go)
291 | return True, None
292 |
293 |
294 | #
295 |
296 |
297 | # https://github.com/jaskaranSM/UniBorg/blob/6d35cf452bce1204613929d4da7530058785b6b1/stdplugins/aria.py#L136-L164
298 |
299 | # todo- so much unwanted code, I will remove in future after some testing
300 | async def check_progress_for_dl(aria2, gid, event, previous_message):
301 | while True:
302 | try:
303 | file = aria2.get_download(gid)
304 | complete = file.is_complete
305 | is_file = file.seeder
306 | if not complete:
307 | if not file.error_message:
308 | if file.has_failed:
309 | LOGGER.info(
310 | f"Cancelling downloading of {file.name} may be due to slow torrent"
311 | )
312 | await event.reply(
313 | f"Download cancelled :\n{file.name}\n\n #MetaDataError", quote=True
314 | )
315 | file.remove(force=True, files=True)
316 | return
317 | else:
318 | msg = file.error_message
319 | LOGGER.info(msg)
320 | await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
321 | await event.reply(f"`{msg}`")
322 | return
323 | await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
324 | # await check_progress_for_dl(aria2, gid, event, previous_message)
325 | else:
326 | LOGGER.info(
327 | f"Downloaded Successfully: `{file.name} ({file.total_length_string()})` 🤒"
328 | )
329 | # await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
330 | if not file.is_metadata:
331 | await event.edit(
332 | f"**Status:** `Downloaded ✅`\n\n**📝 FileName:** `{file.name}`\n\n**📎 Total Size:** `({file.total_length_string()})` \n\n#Downloaded"
333 | )
334 | return
335 | except aria2p.client.ClientException:
336 | await event.reply(
337 | f"Download cancelled :\n{file.name} ({file.total_length_string()})", quote=True
338 | )
339 | return
340 | except MessageNotModified as ep:
341 | LOGGER.info(ep)
342 | await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
343 | # await check_progress_for_dl(aria2, gid, event, previous_message)
344 | return
345 | except FloodWait as e:
346 | LOGGER.info(e)
347 | time.sleep(e.x)
348 | except Exception as e:
349 | LOGGER.info(str(e))
350 | if "not found" in str(e) or "'file'" in str(e):
351 | await event.edit(
352 | f"Download cancelled :\n{file.name} ({file.total_length_string()})"
353 | )
354 | return
355 | else:
356 | LOGGER.info(str(e))
357 | await event.edit(
358 | "error :\n{} \n\n#error".format(str(e))
359 | )
360 | return
361 |
362 |
363 | # https://github.com/jaskaranSM/UniBorg/blob/6d35cf452bce1204613929d4da7530058785b6b1/stdplugins/aria.py#L136-L164
364 |
365 |
366 | async def check_metadata(aria2, gid):
367 | file = aria2.get_download(gid)
368 |
369 | if not file.followed_by_ids:
370 | # https://t.me/c/1213160642/496
371 | return None
372 | new_gid = file.followed_by_ids[0]
373 | LOGGER.info("Changing GID " + gid + " to " + new_gid)
374 | return new_gid
375 |
--------------------------------------------------------------------------------
/tobrot/helper_funcs/upload_to_tg.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | # -*- coding: utf-8 -*-
3 | # (c) Shrimadhav U K | gautamajay52
4 |
5 | import asyncio
6 | import logging
7 | import os
8 | import re
9 | import shutil
10 | import subprocess
11 | import time
12 | from functools import partial
13 | from pathlib import Path
14 |
15 | import pyrogram.types as pyrogram
16 | import requests
17 | from hachoir.metadata import extractMetadata
18 | from hachoir.parser import createParser
19 | from hurry.filesize import size
20 | from PIL import Image
21 | from pyrogram.errors import FloodWait, MessageNotModified
22 | from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, Message
23 | from pyrogram.types import InputMediaAudio, InputMediaDocument, InputMediaVideo
24 | from requests.utils import requote_uri
25 | from tobrot import (
26 | DESTINATION_FOLDER,
27 | DOWNLOAD_LOCATION,
28 | EDIT_SLEEP_TIME_OUT,
29 | INDEX_LINK,
30 | LOGGER,
31 | RCLONE_CONFIG,
32 | TG_MAX_FILE_SIZE,
33 | UPLOAD_AS_DOC,
34 | gDict,
35 | user_specific_config,
36 | )
37 | from tobrot.helper_funcs.copy_similar_file import copy_file
38 | from tobrot.helper_funcs.display_progress import humanbytes, Progress
39 | from tobrot.helper_funcs.help_Nekmo_ffmpeg import take_screen_shot
40 | from tobrot.helper_funcs.split_large_files import split_large_files
41 |
42 | # stackoverflow🤐
43 | def getFolderSize(p):
44 | prepend = partial(os.path.join, p)
45 | return sum(
46 | [
47 | (os.path.getsize(f) if os.path.isfile(f) else getFolderSize(f))
48 | for f in map(prepend, os.listdir(p))
49 | ]
50 | )
51 |
52 |
53 | async def upload_to_tg(
54 | message,
55 | local_file_name,
56 | from_user,
57 | dict_contatining_uploaded_files,
58 | client,
59 | edit_media=False,
60 | yt_thumb=None,
61 | ):
62 | base_file_name = os.path.basename(local_file_name)
63 | caption_str = ""
64 | caption_str += ""
65 | caption_str += base_file_name
66 | caption_str += ""
67 | if os.path.isdir(local_file_name):
68 | directory_contents = os.listdir(local_file_name)
69 | directory_contents.sort()
70 | # number_of_files = len(directory_contents)
71 | LOGGER.info(directory_contents)
72 | new_m_esg = message
73 | if not message.photo:
74 | new_m_esg = await message.reply_text(
75 | f"Found {len(directory_contents)} Files 📡",
76 | quote=True
77 | # reply_to_message_id=message.message_id
78 | )
79 | for single_file in directory_contents:
80 | # recursion: will this FAIL somewhere?
81 | await upload_to_tg(
82 | new_m_esg,
83 | os.path.join(local_file_name, single_file),
84 | from_user,
85 | dict_contatining_uploaded_files,
86 | client,
87 | edit_media,
88 | yt_thumb,
89 | )
90 | else:
91 | if os.path.getsize(local_file_name) > TG_MAX_FILE_SIZE:
92 | LOGGER.info("TODO")
93 | d_f_s = humanbytes(os.path.getsize(local_file_name))
94 | i_m_s_g = await message.reply_text(
95 | "Telegram does not support uploading this file.\n"
96 | f"Detected File Size: {d_f_s} 😡\n"
97 | "\n🤖 trying to split the files 🌝🌝🌚"
98 | )
99 | splitted_dir = await split_large_files(local_file_name)
100 | totlaa_sleif = os.listdir(splitted_dir)
101 | totlaa_sleif.sort()
102 | number_of_files = len(totlaa_sleif)
103 | LOGGER.info(totlaa_sleif)
104 | ba_se_file_name = os.path.basename(local_file_name)
105 | await i_m_s_g.edit_text(
106 | f"Detected File Size: {d_f_s} 😡\n"
107 | f"{ba_se_file_name} splitted into {number_of_files} files.\n"
108 | "Trying to upload to Telegram, now ..."
109 | )
110 | for le_file in totlaa_sleif:
111 | # recursion: will this FAIL somewhere?
112 | await upload_to_tg(
113 | message,
114 | os.path.join(splitted_dir, le_file),
115 | from_user,
116 | dict_contatining_uploaded_files,
117 | client,
118 | edit_media,
119 | yt_thumb,
120 | )
121 | else:
122 | sizze = os.path.getsize(local_file_name)
123 | sent_message = await upload_single_file(
124 | message,
125 | local_file_name,
126 | caption_str,
127 | from_user,
128 | client,
129 | edit_media,
130 | yt_thumb,
131 | )
132 | if sent_message is not None:
133 | dict_contatining_uploaded_files[
134 | os.path.basename(local_file_name)
135 | ] = sent_message.message_id
136 | else:
137 | return
138 | # await message.delete()
139 | return dict_contatining_uploaded_files
140 |
141 |
142 | # © gautamajay52 thanks to Rclone team for this wonderful tool.🧘
143 |
144 |
145 | async def upload_to_gdrive(file_upload, message, messa_ge, g_id):
146 | await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
147 | del_it = await message.edit_text(
148 | f"🔊 Now Uploading to ☁️ Cloud!!!"
149 | )
150 | if not os.path.exists("rclone.conf"):
151 | with open("rclone.conf", "w+", newline="\n", encoding="utf-8") as fole:
152 | fole.write(f"{RCLONE_CONFIG}")
153 | if os.path.exists("rclone.conf"):
154 | with open("rclone.conf", "r+") as file:
155 | con = file.read()
156 | gUP = re.findall("\[(.*)\]", con)[0]
157 | LOGGER.info(gUP)
158 | destination = f"{DESTINATION_FOLDER}"
159 | file_upload = str(Path(file_upload).resolve())
160 | LOGGER.info(file_upload)
161 | if os.path.isfile(file_upload):
162 | g_au = [
163 | "rclone",
164 | "copy",
165 | "--config=rclone.conf",
166 | f"{file_upload}",
167 | f"{gUP}:{destination}",
168 | "-v",
169 | ]
170 | LOGGER.info(g_au)
171 | tmp = await asyncio.create_subprocess_exec(
172 | *g_au, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
173 | )
174 | pro, cess = await tmp.communicate()
175 | LOGGER.info(pro.decode("utf-8"))
176 | LOGGER.info(cess.decode("utf-8"))
177 | gk_file = re.escape(os.path.basename(file_upload))
178 | LOGGER.info(gk_file)
179 | with open("filter.txt", "w+", encoding="utf-8") as filter:
180 | print(f"+ {gk_file}\n- *", file=filter)
181 |
182 | t_a_m = [
183 | "rclone",
184 | "lsf",
185 | "--config=rclone.conf",
186 | "-F",
187 | "i",
188 | "--filter-from=filter.txt",
189 | "--files-only",
190 | f"{gUP}:{destination}",
191 | ]
192 | gau_tam = await asyncio.create_subprocess_exec(
193 | *t_a_m, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
194 | )
195 | # os.remove("filter.txt")
196 | gau, tam = await gau_tam.communicate()
197 | gautam = gau.decode().strip()
198 | LOGGER.info(gau.decode())
199 | LOGGER.info(tam.decode())
200 | # os.remove("filter.txt")
201 | gauti = f"https://drive.google.com/file/d/{gautam}/view?usp=drivesdk"
202 | gjay = size(os.path.getsize(file_upload))
203 | button = []
204 | button.append(
205 | [pyrogram.InlineKeyboardButton(text="☁️ CloudUrl ☁️", url=f"{gauti}")]
206 | )
207 | if INDEX_LINK:
208 | indexurl = f"{INDEX_LINK}/{os.path.basename(file_upload)}"
209 | tam_link = requests.utils.requote_uri(indexurl)
210 | LOGGER.info(tam_link)
211 | button.append(
212 | [
213 | pyrogram.InlineKeyboardButton(
214 | text="ℹ️ IndexUrl ℹ️", url=f"{tam_link}"
215 | )
216 | ]
217 | )
218 | button_markup = pyrogram.InlineKeyboardMarkup(button)
219 | await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
220 | await messa_ge.reply_text(
221 | f"🤖: Uploaded successfully `{os.path.basename(file_upload)}` 🤒\n📀 Size: {gjay}",
222 | reply_markup=button_markup,
223 | )
224 | os.remove(file_upload)
225 | await del_it.delete()
226 | else:
227 | tt = os.path.join(destination, os.path.basename(file_upload))
228 | LOGGER.info(tt)
229 | t_am = [
230 | "rclone",
231 | "copy",
232 | "--config=rclone.conf",
233 | f"{file_upload}",
234 | f"{gUP}:{tt}",
235 | "-v",
236 | ]
237 | LOGGER.info(t_am)
238 | tmp = await asyncio.create_subprocess_exec(
239 | *t_am, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
240 | )
241 | pro, cess = await tmp.communicate()
242 | LOGGER.info(pro.decode("utf-8"))
243 | LOGGER.info(cess.decode("utf-8"))
244 | g_file = re.escape(os.path.basename(file_upload))
245 | LOGGER.info(g_file)
246 | with open("filter1.txt", "w+", encoding="utf-8") as filter1:
247 | print(f"+ {g_file}/\n- *", file=filter1)
248 |
249 | g_a_u = [
250 | "rclone",
251 | "lsf",
252 | "--config=rclone.conf",
253 | "-F",
254 | "i",
255 | "--filter-from=filter1.txt",
256 | "--dirs-only",
257 | f"{gUP}:{destination}",
258 | ]
259 | gau_tam = await asyncio.create_subprocess_exec(
260 | *g_a_u, stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE
261 | )
262 | # os.remove("filter1.txt")
263 | gau, tam = await gau_tam.communicate()
264 | gautam = gau.decode("utf-8")
265 | LOGGER.info(gautam)
266 | LOGGER.info(tam.decode("utf-8"))
267 | # os.remove("filter1.txt")
268 | gautii = f"https://drive.google.com/folderview?id={gautam}"
269 | gjay = size(getFolderSize(file_upload))
270 | LOGGER.info(gjay)
271 | button = []
272 | button.append(
273 | [pyrogram.InlineKeyboardButton(text="☁️ CloudUrl ☁️", url=f"{gautii}")]
274 | )
275 | if INDEX_LINK:
276 | indexurl = f"{INDEX_LINK}/{os.path.basename(file_upload)}/"
277 | tam_link = requests.utils.requote_uri(indexurl)
278 | LOGGER.info(tam_link)
279 | button.append(
280 | [
281 | pyrogram.InlineKeyboardButton(
282 | text="ℹ️ IndexUrl ℹ️", url=f"{tam_link}"
283 | )
284 | ]
285 | )
286 | button_markup = pyrogram.InlineKeyboardMarkup(button)
287 | await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
288 | await messa_ge.reply_text(
289 | f"🤖: Uploaded successfully `{os.path.basename(file_upload)}` 🤒\n📀 Size: {gjay}",
290 | reply_markup=button_markup,
291 | )
292 | shutil.rmtree(file_upload)
293 | await del_it.delete()
294 |
295 |
296 |
297 |
298 |
299 | async def upload_single_file(
300 | message, local_file_name, caption_str, from_user, client, edit_media, yt_thumb
301 | ):
302 | await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
303 | local_file_name = str(Path(local_file_name).resolve())
304 | sent_message = None
305 | start_time = time.time()
306 | #
307 | thumbnail_location = os.path.join(
308 | DOWNLOAD_LOCATION, "thumbnails", str(from_user) + ".jpg"
309 | )
310 | # LOGGER.info(thumbnail_location)
311 | dyna_user_config_upload_as_doc = False
312 | for key in iter(user_specific_config):
313 | if key == from_user:
314 | dyna_user_config_upload_as_doc=user_specific_config[key].upload_as_doc
315 | LOGGER.info(f'Found dyanamic config for user {from_user}')
316 | #
317 | if UPLOAD_AS_DOC.upper() == "TRUE" or dyna_user_config_upload_as_doc:
318 | # todo
319 | thumb = None
320 | thumb_image_path = None
321 | if os.path.exists(thumbnail_location):
322 | thumb_image_path = await copy_file(
323 | thumbnail_location, os.path.dirname(os.path.abspath(local_file_name))
324 | )
325 | thumb = thumb_image_path
326 | message_for_progress_display = message
327 | if not edit_media:
328 | message_for_progress_display = await message.reply_text(
329 | "**Status :** `Starting Uploading 📤`\n\n**• FileName :** `{}`".format(os.path.basename(local_file_name))
330 | )
331 | prog = Progress(from_user, client, message_for_progress_display)
332 | sent_message = await message.reply_document(
333 | document=local_file_name,
334 | thumb=thumb,
335 | caption=caption_str,
336 | parse_mode="html",
337 | disable_notification=True,
338 | progress=prog.progress_for_pyrogram,
339 | progress_args=(
340 | f"**• Uploading :** `{os.path.basename(local_file_name)}`",
341 | start_time,
342 | ),
343 | )
344 | if message.message_id != message_for_progress_display.message_id:
345 | try:
346 | await message_for_progress_display.delete()
347 | except FloodWait as gf:
348 | time.sleep(gf.x)
349 | except Exception as rr:
350 | LOGGER.warning(str(rr))
351 | os.remove(local_file_name)
352 | if thumb is not None:
353 | os.remove(thumb)
354 | else:
355 | try:
356 | message_for_progress_display = message
357 | if not edit_media:
358 | message_for_progress_display = await message.reply_text(
359 | "**Status :** `Starting Uploading 📤`\n\n**• FileName :** `{}`".format(os.path.basename(local_file_name))
360 | )
361 | prog = Progress(from_user, client, message_for_progress_display)
362 | if local_file_name.upper().endswith(("MKV", "MP4", "WEBM", "FLV", "3GP", "AVI", "MOV", "OGG", "WMV", "M4V", "TS", "MPG", "MTS", "M2TS")):
363 | duration = 0
364 | try:
365 | metadata = extractMetadata(createParser(local_file_name))
366 | if metadata.has("duration"):
367 | duration = metadata.get("duration").seconds
368 | except Exception as g_e:
369 | LOGGER.info(g_e)
370 | width = 0
371 | height = 0
372 | thumb_image_path = None
373 | if os.path.exists(thumbnail_location):
374 | thumb_image_path = await copy_file(
375 | thumbnail_location,
376 | os.path.dirname(os.path.abspath(local_file_name)),
377 | )
378 | else:
379 | if not yt_thumb:
380 | LOGGER.info("Taking Screenshot..")
381 | thumb_image_path = await take_screen_shot(
382 | local_file_name,
383 | os.path.dirname(os.path.abspath(local_file_name)),
384 | (duration / 2),
385 | )
386 | else:
387 | req = requests.get(yt_thumb)
388 | thumb_image_path = os.path.join(
389 | os.path.dirname(os.path.abspath(local_file_name)),
390 | str(time.time()) + ".jpg",
391 | )
392 | with open(thumb_image_path, "wb") as thum:
393 | thum.write(req.content)
394 | img = Image.open(thumb_image_path).convert("RGB")
395 | img.save(thumb_image_path, format="jpeg")
396 | # get the correct width, height, and duration for videos greater than 10MB
397 | if os.path.exists(thumb_image_path):
398 | metadata = extractMetadata(createParser(thumb_image_path))
399 | if metadata.has("width"):
400 | width = metadata.get("width")
401 | if metadata.has("height"):
402 | height = metadata.get("height")
403 | # ref: https://t.me/PyrogramChat/44663
404 | # https://stackoverflow.com/a/21669827/4723940
405 | Image.open(thumb_image_path).convert("RGB").save(
406 | thumb_image_path
407 | )
408 | img = Image.open(thumb_image_path)
409 | # https://stackoverflow.com/a/37631799/4723940
410 | img.resize((320, height))
411 | img.save(thumb_image_path, "JPEG")
412 | # https://pillow.readthedocs.io/en/3.1.x/reference/Image.html#create-thumbnails
413 | #
414 | thumb = None
415 | if thumb_image_path is not None and os.path.isfile(thumb_image_path):
416 | thumb = thumb_image_path
417 | # send video
418 | if edit_media and message.photo:
419 | await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
420 | sent_message = await message.edit_media(
421 | media=InputMediaVideo(
422 | media=local_file_name,
423 | thumb=thumb,
424 | caption=caption_str,
425 | parse_mode="html",
426 | width=width,
427 | height=height,
428 | duration=duration,
429 | supports_streaming=True,
430 | )
431 | # quote=True,
432 | )
433 | else:
434 | sent_message = await message.reply_video(
435 | video=local_file_name,
436 | caption=caption_str,
437 | parse_mode="html",
438 | duration=duration,
439 | width=width,
440 | height=height,
441 | thumb=thumb,
442 | supports_streaming=True,
443 | disable_notification=True,
444 | progress=prog.progress_for_pyrogram,
445 | progress_args=(
446 | f"**• Uploading :** `{os.path.basename(local_file_name)}`",
447 | start_time,
448 | ),
449 | )
450 | if thumb is not None:
451 | os.remove(thumb)
452 | elif local_file_name.upper().endswith(("MP3", "M4A", "M4B", "FLAC", "WAV")):
453 | metadata = extractMetadata(createParser(local_file_name))
454 | duration = 0
455 | title = ""
456 | artist = ""
457 | if metadata.has("duration"):
458 | duration = metadata.get("duration").seconds
459 | if metadata.has("title"):
460 | title = metadata.get("title")
461 | if metadata.has("artist"):
462 | artist = metadata.get("artist")
463 | thumb_image_path = None
464 | if os.path.isfile(thumbnail_location):
465 | thumb_image_path = await copy_file(
466 | thumbnail_location,
467 | os.path.dirname(os.path.abspath(local_file_name)),
468 | )
469 | thumb = None
470 | if thumb_image_path is not None and os.path.isfile(thumb_image_path):
471 | thumb = thumb_image_path
472 | # send audio
473 | if edit_media and message.photo:
474 | await asyncio.sleep(EDIT_SLEEP_TIME_OUT)
475 | sent_message = await message.edit_media(
476 | media=InputMediaAudio(
477 | media=local_file_name,
478 | thumb=thumb,
479 | caption=caption_str,
480 | parse_mode="html",
481 | duration=duration,
482 | performer=artist,
483 | title=title,
484 | )
485 | )
486 | else:
487 | sent_message = await message.reply_audio(
488 | audio=local_file_name,
489 | caption=caption_str,
490 | parse_mode="html",
491 | duration=duration,
492 | performer=artist,
493 | title=title,
494 | thumb=thumb,
495 | disable_notification=True,
496 | progress=prog.progress_for_pyrogram,
497 | progress_args=(
498 | f"**• Uploading :** `{os.path.basename(local_file_name)}`",
499 | start_time,
500 | ),
501 | )
502 | if thumb is not None:
503 | os.remove(thumb)
504 | else:
505 | thumb_image_path = None
506 | if os.path.isfile(thumbnail_location):
507 | thumb_image_path = await copy_file(
508 | thumbnail_location,
509 | os.path.dirname(os.path.abspath(local_file_name)),
510 | )
511 | # if a file, don't upload "thumb"
512 | # this "diff" is a major derp -_- 😔😭😭
513 | thumb = None
514 | if thumb_image_path is not None and os.path.isfile(thumb_image_path):
515 | thumb = thumb_image_path
516 | #
517 | # send document
518 | if edit_media and message.photo:
519 | sent_message = await message.edit_media(
520 | media=InputMediaDocument(
521 | media=local_file_name,
522 | thumb=thumb,
523 | caption=caption_str,
524 | parse_mode="html",
525 | )
526 | )
527 | else:
528 | sent_message = await message.reply_document(
529 | document=local_file_name,
530 | thumb=thumb,
531 | caption=caption_str,
532 | parse_mode="html",
533 | disable_notification=True,
534 | progress=prog.progress_for_pyrogram,
535 | progress_args=(
536 | f"**• Uploading :** `{os.path.basename(local_file_name)}`",
537 | start_time,
538 | ),
539 | )
540 | if thumb is not None:
541 | os.remove(thumb)
542 |
543 | except MessageNotModified as oY:
544 | LOGGER.info(oY)
545 | except FloodWait as g:
546 | LOGGER.info(g)
547 | time.sleep(g.x)
548 | except Exception as e:
549 | LOGGER.info(e)
550 | await message_for_progress_display.edit_text("**FAILED**\n" + str(e))
551 | else:
552 | if message.message_id != message_for_progress_display.message_id:
553 | try:
554 | if sent_message is not None:
555 | await message_for_progress_display.delete()
556 | except FloodWait as gf:
557 | time.sleep(gf.x)
558 | except Exception as rr:
559 | LOGGER.warning(str(rr))
560 | await asyncio.sleep(5)
561 | os.remove(local_file_name)
562 | return sent_message
563 |
--------------------------------------------------------------------------------
/COPYING:
--------------------------------------------------------------------------------
1 | GNU AFFERO GENERAL PUBLIC LICENSE
2 | Version 3, 19 November 2007
3 |
4 | Copyright (C) 2007 Free Software Foundation, Inc.