├── web
├── __init__.py
└── nodes.py
├── bot
├── helper
│ ├── __init__.py
│ ├── ext_utils
│ │ ├── __init__.py
│ │ ├── exceptions.py
│ │ ├── bulk_links.py
│ │ ├── telegraph_helper.py
│ │ ├── task_manager.py
│ │ ├── fs_utils.py
│ │ ├── db_handler.py
│ │ └── leech_utils.py
│ ├── listeners
│ │ ├── __init__.py
│ │ ├── qbit_listener.py
│ │ └── aria2_listener.py
│ ├── mirror_utils
│ │ ├── __init__.py
│ │ ├── gdrive_utlis
│ │ │ ├── __init__.py
│ │ │ ├── delete.py
│ │ │ ├── count.py
│ │ │ ├── clone.py
│ │ │ ├── download.py
│ │ │ ├── search.py
│ │ │ ├── helper.py
│ │ │ └── upload.py
│ │ ├── download_utils
│ │ │ ├── __init__.py
│ │ │ ├── gd_download.py
│ │ │ ├── rclone_download.py
│ │ │ ├── aria2_download.py
│ │ │ ├── direct_link_generator_license.md
│ │ │ ├── switch_download.py
│ │ │ ├── qbit_download.py
│ │ │ └── mega_download.py
│ │ ├── rclone_utils
│ │ │ ├── __init__.py
│ │ │ └── serve.py
│ │ ├── status_utils
│ │ │ ├── __init__.py
│ │ │ ├── rclone_status.py
│ │ │ ├── split_status.py
│ │ │ ├── queue_status.py
│ │ │ ├── mega_download_status.py
│ │ │ ├── switch_status.py
│ │ │ ├── gdrive_status.py
│ │ │ ├── yt_dlp_download_status.py
│ │ │ ├── zip_status.py
│ │ │ ├── extract_status.py
│ │ │ ├── aria2_status.py
│ │ │ └── qbit_status.py
│ │ └── upload_utils
│ │ │ └── __init__.py
│ └── switch_helper
│ │ ├── __init__.py
│ │ ├── filters.py
│ │ ├── bot_commands.py
│ │ ├── button_build.py
│ │ └── message_utils.py
└── modules
│ ├── __init__.py
│ ├── gd_delete.py
│ ├── shell.py
│ ├── gd_count.py
│ ├── status.py
│ ├── gd_search.py
│ ├── eval.py
│ ├── authorize.py
│ ├── cancel_mirror.py
│ └── torrent_select.py
├── _config.yml
├── start.sh
├── captain-definition
├── requirements-cli.txt
├── docker-compose.yml
├── Dockerfile
├── .gitignore
├── generate_string_session.py
├── requirements.txt
├── aria.sh
├── generate_drive_token.py
├── qBittorrent
└── config
│ └── qBittorrent.conf
├── driveid.py
├── update.py
├── add_to_team_drive.py
└── config_sample.env
/web/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/bot/helper/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/bot/modules/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/bot/helper/ext_utils/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/bot/helper/listeners/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/_config.yml:
--------------------------------------------------------------------------------
1 | theme: jekyll-theme-time-machine
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/bot/helper/switch_helper/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/gdrive_utlis/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/start.sh:
--------------------------------------------------------------------------------
1 | python3 update.py && python3 -m bot
2 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/download_utils/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/rclone_utils/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/status_utils/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/upload_utils/__init__.py:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/captain-definition:
--------------------------------------------------------------------------------
1 | {
2 | "schemaVersion": 2,
3 | "dockerfilePath": "./Dockerfile"
4 | }
5 |
--------------------------------------------------------------------------------
/requirements-cli.txt:
--------------------------------------------------------------------------------
1 | oauth2client
2 | google-api-python-client
3 | progress
4 | progressbar2
5 | httplib2shim
6 | google_auth_oauthlib
7 | pyrogram>=2
8 |
--------------------------------------------------------------------------------
/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "3.3"
2 |
3 | services:
4 | app:
5 | build: .
6 | command: bash start.sh
7 | restart: on-failure
8 | ports:
9 | - "80:80"
10 | - "8080:8080"
11 |
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM anasty17/mltb:latest
2 |
3 | WORKDIR /usr/src/app
4 | RUN chmod 777 /usr/src/app
5 |
6 | COPY requirements.txt .
7 | RUN pip3 install --no-cache-dir -r requirements.txt
8 |
9 | COPY . .
10 |
11 | CMD ["bash", "start.sh"]
12 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | config.env
2 | *.pyc
3 | data*
4 | .vscode
5 | .idea
6 | *.json
7 | *.pickle
8 | .netrc
9 | log.txt
10 | accounts/*
11 | Thumbnails/*
12 | rclone/*
13 | list_drives.txt
14 | cookies.txt
15 | downloads
16 | terabox.txt
17 | rclone.conf
18 |
--------------------------------------------------------------------------------
/generate_string_session.py:
--------------------------------------------------------------------------------
1 | try:
2 | from pyrogram import Client
3 | except Exception as e:
4 | print(e)
5 | print('\nInstall pyrogram: pip3 install pyrogram')
6 | exit(1)
7 |
8 | print('Required pyrogram V2 or greater.')
9 | API_KEY = int(input("Enter API KEY: "))
10 | API_HASH = input("Enter API HASH: ")
11 | with Client(name='USS', api_id=API_KEY, api_hash=API_HASH, in_memory=True) as app:
12 | print(app.export_session_string())
13 |
--------------------------------------------------------------------------------
/bot/helper/ext_utils/exceptions.py:
--------------------------------------------------------------------------------
1 | class DirectDownloadLinkException(Exception):
2 | """Not method found for extracting direct download link from the http link"""
3 | pass
4 |
5 |
6 | class NotSupportedExtractionArchive(Exception):
7 | """The archive format use is trying to extract is not supported"""
8 | pass
9 |
10 |
11 | class RssShutdownException(Exception):
12 | """This exception should be raised when shutdown is called to stop the montior"""
13 | pass
14 |
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | aiohttp
2 | aiofiles
3 | aioshutil
4 | anytree
5 | apscheduler
6 | aria2p
7 | asyncio
8 | beautifulsoup4
9 | cloudscraper
10 | dnspython
11 | feedparser
12 | flask
13 | gevent
14 | google-api-python-client
15 | google-auth-httplib2
16 | google-auth-oauthlib
17 | gunicorn
18 | git+https://github.com/zevtyardt/lk21.git
19 | httpx
20 | lxml
21 | motor
22 | mutagen
23 | natsort
24 | pillow
25 | psutil
26 | pybase64
27 | pymongo
28 | python-dotenv
29 | python-magic
30 | qbittorrent-api
31 | requests
32 | swibots
33 | telegraph
34 | tenacity
35 | uvloop
36 | xattr
37 | yt-dlp
38 |
--------------------------------------------------------------------------------
/aria.sh:
--------------------------------------------------------------------------------
1 | tracker_list=$(curl -Ns https://ngosang.github.io/trackerslist/trackers_all_http.txt | awk '$0' | tr '\n\n' ',')
2 | aria2c --allow-overwrite=true --auto-file-renaming=true --bt-enable-lpd=true --bt-detach-seed-only=true \
3 | --bt-remove-unselected-file=true --bt-tracker="[$tracker_list]" --bt-max-peers=0 --enable-rpc=true \
4 | --rpc-max-request-size=1024M --max-connection-per-server=10 --max-concurrent-downloads=10 --split=10 \
5 | --seed-ratio=0 --check-integrity=true --continue=true --daemon=true --disk-cache=40M --force-save=true \
6 | --min-split-size=10M --follow-torrent=mem --check-certificate=false --optimize-concurrent-downloads=true \
7 | --http-accept-gzip=true --max-file-not-found=0 --max-tries=20 --peer-id-prefix=-qB4520- --reuse-uri=true \
8 | --content-disposition-default-utf8=true --user-agent=Wget/1.12 --peer-agent=qBittorrent/4.5.2 --quiet=true \
9 | --summary-interval=0 --max-upload-limit=1K
--------------------------------------------------------------------------------
/generate_drive_token.py:
--------------------------------------------------------------------------------
1 | import pickle
2 | import os
3 | from google_auth_oauthlib.flow import InstalledAppFlow
4 | from google.auth.transport.requests import Request
5 |
6 | credentials = None
7 | __G_DRIVE_TOKEN_FILE = "token.pickle"
8 | __OAUTH_SCOPE = ["https://www.googleapis.com/auth/drive"]
9 | if os.path.exists(__G_DRIVE_TOKEN_FILE):
10 | with open(__G_DRIVE_TOKEN_FILE, 'rb') as f:
11 | credentials = pickle.load(f)
12 | if (
13 | (credentials is None or not credentials.valid)
14 | and credentials
15 | and credentials.expired
16 | and credentials.refresh_token
17 | ):
18 | credentials.refresh(Request())
19 | else:
20 | flow = InstalledAppFlow.from_client_secrets_file(
21 | 'credentials.json', __OAUTH_SCOPE)
22 | credentials = flow.run_local_server(port=0, open_browser=False)
23 |
24 | # Save the credentials for the next run
25 | with open(__G_DRIVE_TOKEN_FILE, 'wb') as token:
26 | pickle.dump(credentials, token)
27 |
--------------------------------------------------------------------------------
/bot/helper/switch_helper/filters.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from swibots import filters
3 |
4 | from bot import user_data, OWNER_ID, LOGGER
5 |
6 |
7 | class CustomFilters:
8 |
9 | async def owner_filter(self, ctx):
10 | return ctx.event.action_by_id == OWNER_ID
11 |
12 | owner = filters.create(owner_filter)
13 |
14 | async def authorized_user(self, ctx):
15 | uid = ctx.event.action_by_id
16 | chat_id = ctx.event.community_id
17 | return bool(uid == OWNER_ID or (uid in user_data and (user_data[uid].get('is_auth', False) or
18 | user_data[uid].get('is_sudo', False))) or (chat_id in user_data and user_data[chat_id].get('is_auth', False)))
19 |
20 | authorized = filters.create(authorized_user)
21 |
22 | async def sudo_user(self, ctx):
23 | uid = ctx.event.action_by_id
24 | return bool(uid == OWNER_ID or uid in user_data and user_data[uid].get('is_sudo'))
25 |
26 | sudo = filters.create(sudo_user)
27 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/status_utils/rclone_status.py:
--------------------------------------------------------------------------------
1 | from bot.helper.ext_utils.bot_utils import MirrorStatus
2 |
3 |
4 | class RcloneStatus:
5 | def __init__(self, obj, message, gid, status):
6 | self.__obj = obj
7 | self.__gid = gid
8 | self.__status = status
9 | self.message = message
10 |
11 | def gid(self):
12 | return self.__gid
13 |
14 | def progress(self):
15 | return self.__obj.percentage
16 |
17 | def speed(self):
18 | return self.__obj.speed
19 |
20 | def name(self):
21 | return self.__obj.name
22 |
23 | def size(self):
24 | return self.__obj.size
25 |
26 | def eta(self):
27 | return self.__obj.eta
28 |
29 | def status(self):
30 | if self.__status == 'dl':
31 | return MirrorStatus.STATUS_DOWNLOADING
32 | elif self.__status == 'up':
33 | return MirrorStatus.STATUS_UPLOADING
34 | else:
35 | return MirrorStatus.STATUS_CLONING
36 |
37 | def processed_bytes(self):
38 | return self.__obj.transferred_size
39 |
40 | def download(self):
41 | return self.__obj
42 |
--------------------------------------------------------------------------------
/bot/modules/gd_delete.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from swibots import CommandHandler
3 |
4 | from bot import bot, LOGGER
5 | from bot.helper.switch_helper.message_utils import auto_delete_message, sendMessage
6 | from bot.helper.switch_helper.filters import CustomFilters
7 | from bot.helper.switch_helper.bot_commands import BotCommands
8 | from bot.helper.mirror_utils.gdrive_utlis.delete import gdDelete
9 | from bot.helper.ext_utils.bot_utils import is_gdrive_link, sync_to_async
10 |
11 |
12 | async def deletefile(ctx):
13 | message = ctx.event.message
14 | args = message.text.split()
15 | if len(args) > 1:
16 | link = args[1]
17 | elif reply_to := message.replied_to:
18 | link = reply_to.message.split(maxsplit=1)[0].strip()
19 | else:
20 | link = ''
21 | if is_gdrive_link(link):
22 | LOGGER.info(link)
23 | msg = await sync_to_async(gdDelete().deletefile, link, message.user_id)
24 | else:
25 | msg = 'Send Gdrive link along with command or by replying to the link by command'
26 | reply_message = await sendMessage(message, msg)
27 | await auto_delete_message(message, reply_message)
28 |
29 |
30 | bot.add_handler(CommandHandler(BotCommands.DeleteCommand, deletefile, filter=CustomFilters.authorized))
31 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/status_utils/split_status.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from bot import LOGGER
3 | from bot.helper.ext_utils.bot_utils import get_readable_file_size, MirrorStatus
4 |
5 |
6 | class SplitStatus:
7 | def __init__(self, name, size, gid, listener):
8 | self.__name = name
9 | self.__gid = gid
10 | self.__size = size
11 | self.__listener = listener
12 | self.message = listener.message
13 |
14 | def gid(self):
15 | return self.__gid
16 |
17 | def progress(self):
18 | return '0'
19 |
20 | def speed(self):
21 | return '0'
22 |
23 | def name(self):
24 | return self.__name
25 |
26 | def size(self):
27 | return get_readable_file_size(self.__size)
28 |
29 | def eta(self):
30 | return '0s'
31 |
32 | def status(self):
33 | return MirrorStatus.STATUS_SPLITTING
34 |
35 | def processed_bytes(self):
36 | return 0
37 |
38 | def download(self):
39 | return self
40 |
41 | async def cancel_download(self):
42 | LOGGER.info(f'Cancelling Split: {self.__name}')
43 | if self.__listener.suproc is not None:
44 | self.__listener.suproc.kill()
45 | else:
46 | self.__listener.suproc = 'cancelled'
47 | await self.__listener.onUploadError('splitting stopped by user!')
48 |
--------------------------------------------------------------------------------
/bot/modules/shell.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from swibots import CommandHandler
3 | from io import BytesIO
4 |
5 | from bot import LOGGER, bot
6 | from bot.helper.switch_helper.message_utils import sendMessage, sendFile
7 | from bot.helper.ext_utils.bot_utils import cmd_exec
8 | from bot.helper.switch_helper.filters import CustomFilters
9 | from bot.helper.switch_helper.bot_commands import BotCommands
10 |
11 |
12 | async def shell(ctx):
13 | message = ctx.event.message
14 | cmd = message.message.split(maxsplit=1)
15 | if len(cmd) == 1:
16 | await sendMessage(message, 'No command to execute was given.')
17 | return
18 | cmd = cmd[1]
19 | stdout, stderr, _ = await cmd_exec(cmd, shell=True)
20 | reply = ''
21 | if len(stdout) != 0:
22 | reply += f"*Stdout*\n{stdout}\n"
23 | LOGGER.info(f"Shell - {cmd} - {stdout}")
24 | if len(stderr) != 0:
25 | reply += f"*Stderr*\n{stderr}"
26 | LOGGER.error(f"Shell - {cmd} - {stderr}")
27 | if len(reply) > 3000:
28 | with BytesIO(str.encode(reply)) as out_file:
29 | await sendFile(message, out_file, "shell_output.txt")
30 | elif len(reply) != 0:
31 | await sendMessage(message, reply)
32 | else:
33 | await sendMessage(message, 'No Reply')
34 |
35 |
36 | bot.add_handler(CommandHandler(BotCommands.ShellCommand,
37 | shell, filter=CustomFilters.owner))
38 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/gdrive_utlis/delete.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from logging import getLogger
3 | from googleapiclient.errors import HttpError
4 |
5 | from bot.helper.mirror_utils.gdrive_utlis.helper import GoogleDriveHelper
6 |
7 | LOGGER = getLogger(__name__)
8 |
9 |
10 | class gdDelete(GoogleDriveHelper):
11 |
12 | def __init__(self):
13 | super().__init__()
14 |
15 | def deletefile(self, link, user_id):
16 | try:
17 | file_id = self.getIdFromUrl(link, user_id)
18 | except (KeyError, IndexError):
19 | return "Google Drive ID could not be found in the provided link"
20 | self.service = self.authorize()
21 | msg = ''
22 | try:
23 | self.service.files().delete(fileId=file_id, supportsAllDrives=True).execute()
24 | msg = "Successfully deleted"
25 | LOGGER.info(f"Delete Result: {msg}")
26 | except HttpError as err:
27 | if "File not found" in str(err) or "insufficientFilePermissions" in str(err):
28 | if not self.alt_auth and self.use_sa:
29 | self.alt_auth = True
30 | self.use_sa = False
31 | LOGGER.error('File not found. Trying with token.pickle...')
32 | return self.deletefile(link, user_id)
33 | err = "File not found or insufficientFilePermissions!"
34 | LOGGER.error(f"Delete Result: {err}")
35 | msg = str(err)
36 | return msg
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/status_utils/queue_status.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from bot import LOGGER
3 | from bot.helper.ext_utils.bot_utils import get_readable_file_size, MirrorStatus
4 |
5 |
6 | class QueueStatus:
7 | def __init__(self, name, size, gid, listener, status):
8 | self.__name = name
9 | self.__size = size
10 | self.__gid = gid
11 | self.__listener = listener
12 | self.__status = status
13 | self.message = listener.message
14 |
15 | def gid(self):
16 | return self.__gid
17 |
18 | def name(self):
19 | return self.__name
20 |
21 | def size(self):
22 | return get_readable_file_size(self.__size)
23 |
24 | def status(self):
25 | if self.__status == 'dl':
26 | return MirrorStatus.STATUS_QUEUEDL
27 | return MirrorStatus.STATUS_QUEUEUP
28 |
29 | def processed_bytes(self):
30 | return 0
31 |
32 | def progress(self):
33 | return '0%'
34 |
35 | def speed(self):
36 | return '0B/s'
37 |
38 | def eta(self):
39 | return '-'
40 |
41 | def download(self):
42 | return self
43 |
44 | async def cancel_download(self):
45 | LOGGER.info(f'Cancelling Queue{self.__status}: {self.__name}')
46 | if self.__status == 'dl':
47 | await self.__listener.onDownloadError('task have been removed from queue/download')
48 | else:
49 | await self.__listener.onUploadError('task have been removed from queue/upload')
50 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/status_utils/mega_download_status.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from bot.helper.ext_utils.bot_utils import get_readable_file_size, MirrorStatus, get_readable_time
3 |
4 |
5 | class MegaDownloadStatus:
6 |
7 | def __init__(self, name, size, gid, obj, message):
8 | self.__obj = obj
9 | self.__name = name
10 | self.__size = size
11 | self.__gid = gid
12 | self.message = message
13 |
14 | def name(self):
15 | return self.__name
16 |
17 | def progress_raw(self):
18 | try:
19 | return round(self.__obj.downloaded_bytes / self.__size * 100, 2)
20 | except:
21 | return 0.0
22 |
23 | def progress(self):
24 | return f"{self.progress_raw()}%"
25 |
26 | def status(self):
27 | return MirrorStatus.STATUS_DOWNLOADING
28 |
29 | def processed_bytes(self):
30 | return get_readable_file_size(self.__obj.downloaded_bytes)
31 |
32 | def eta(self):
33 | try:
34 | seconds = (self.__size - self.__obj.downloaded_bytes) / \
35 | self.__obj.speed
36 | return get_readable_time(seconds)
37 | except ZeroDivisionError:
38 | return '-'
39 |
40 | def size(self):
41 | return get_readable_file_size(self.__size)
42 |
43 | def speed(self):
44 | return f'{get_readable_file_size(self.__obj.speed)}/s'
45 |
46 | def gid(self):
47 | return self.__gid
48 |
49 | def download(self):
50 | return self.__obj
51 |
--------------------------------------------------------------------------------
/bot/helper/switch_helper/bot_commands.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | class _BotCommands:
4 | def __init__(self):
5 | self.StartCommand = 'start'
6 | self.MirrorCommand = ['mirror', 'm']
7 | self.QbMirrorCommand = ['qbmirror', 'qm']
8 | self.YtdlCommand = ['ytdl', 'y']
9 | self.LeechCommand = ['leech', 'l']
10 | self.QbLeechCommand = ['qbleech', 'ql']
11 | self.YtdlLeechCommand = ['ytdlleech', 'yl']
12 | self.CloneCommand = 'clone'
13 | self.CountCommand = 'count'
14 | self.DeleteCommand = 'del'
15 | self.CancelMirror = 'cancel'
16 | self.CancelAllCommand = 'cancelall'
17 | self.ListCommand = 'list'
18 | self.SearchCommand = 'search'
19 | self.StatusCommand = 'status'
20 | self.UsersCommand = 'users'
21 | self.AuthorizeCommand = 'authorize'
22 | self.UnAuthorizeCommand = 'unauthorize'
23 | self.AddSudoCommand = 'addsudo'
24 | self.RmSudoCommand = 'rmsudo'
25 | self.PingCommand = 'ping'
26 | self.RestartCommand = 'restart'
27 | self.StatsCommand = 'stats'
28 | self.HelpCommand = 'help'
29 | self.LogCommand = 'log'
30 | self.ShellCommand = 'shell'
31 | self.EvalCommand = 'eval'
32 | self.ExecCommand = 'exec'
33 | self.ClearLocalsCommand = 'clearlocals'
34 | self.BotSetCommand = 'bsetting'
35 | self.UserSetCommand = 'usetting'
36 | self.BtSelectCommand = 'btsel'
37 | self.RssCommand = 'rss'
38 |
39 |
40 | BotCommands = _BotCommands()
41 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/status_utils/switch_status.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from bot.helper.ext_utils.bot_utils import MirrorStatus, get_readable_file_size, get_readable_time
3 |
4 |
5 | class SwitchStatus:
6 | def __init__(self, obj, size, message, gid, status):
7 | self.__obj = obj
8 | self.__size = size
9 | self.__gid = gid
10 | self.__status = status
11 | self.message = message
12 |
13 | def processed_bytes(self):
14 | return get_readable_file_size(self.__obj.processed_bytes)
15 |
16 | def size(self):
17 | return get_readable_file_size(self.__size)
18 |
19 | def status(self):
20 | if self.__status == 'up':
21 | return MirrorStatus.STATUS_UPLOADING
22 | return MirrorStatus.STATUS_DOWNLOADING
23 |
24 | def name(self):
25 | return self.__obj.name
26 |
27 | def progress(self):
28 | try:
29 | progress_raw = self.__obj.processed_bytes / self.__size * 100
30 | except:
31 | progress_raw = 0
32 | return f'{round(progress_raw, 2)}%'
33 |
34 | def speed(self):
35 | return f'{get_readable_file_size(self.__obj.speed)}/s'
36 |
37 | def eta(self):
38 | try:
39 | seconds = (self.__size - self.__obj.processed_bytes) / \
40 | self.__obj.speed
41 | return get_readable_time(seconds)
42 | except:
43 | return '-'
44 |
45 | def gid(self) -> str:
46 | return self.__gid
47 |
48 | def download(self):
49 | return self.__obj
50 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/status_utils/gdrive_status.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from bot.helper.ext_utils.bot_utils import MirrorStatus, get_readable_file_size, get_readable_time
3 |
4 |
5 | class GdriveStatus:
6 | def __init__(self, obj, size, message, gid, status):
7 | self.__obj = obj
8 | self.__size = size
9 | self.__gid = gid
10 | self.__status = status
11 | self.message = message
12 |
13 | def processed_bytes(self):
14 | return get_readable_file_size(self.__obj.processed_bytes)
15 |
16 | def size(self):
17 | return get_readable_file_size(self.__size)
18 |
19 | def status(self):
20 | if self.__status == 'up':
21 | return MirrorStatus.STATUS_UPLOADING
22 | elif self.__status == 'dl':
23 | return MirrorStatus.STATUS_DOWNLOADING
24 | else:
25 | return MirrorStatus.STATUS_CLONING
26 |
27 | def name(self):
28 | return self.__obj.name
29 |
30 | def gid(self) -> str:
31 | return self.__gid
32 |
33 | def progress_raw(self):
34 | try:
35 | return self.__obj.processed_bytes / self.__size * 100
36 | except:
37 | return 0
38 |
39 | def progress(self):
40 | return f'{round(self.progress_raw(), 2)}%'
41 |
42 | def speed(self):
43 | return f'{get_readable_file_size(self.__obj.speed)}/s'
44 |
45 | def eta(self):
46 | try:
47 | seconds = (self.__size - self.__obj.processed_bytes) / \
48 | self.__obj.speed
49 | return get_readable_time(seconds)
50 | except:
51 | return '-'
52 |
53 | def download(self):
54 | return self.__obj
55 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/status_utils/yt_dlp_download_status.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from bot.helper.ext_utils.bot_utils import MirrorStatus, get_readable_file_size, get_readable_time, async_to_sync
3 | from bot.helper.ext_utils.fs_utils import get_path_size
4 |
5 |
6 | class YtDlpDownloadStatus:
7 | def __init__(self, obj, listener, gid):
8 | self.__obj = obj
9 | self.__listener = listener
10 | self.__gid = gid
11 | self.message = listener.message
12 |
13 | def gid(self):
14 | return self.__gid
15 |
16 | def processed_bytes(self):
17 | return get_readable_file_size(self.processed_raw())
18 |
19 | def processed_raw(self):
20 | if self.__obj.downloaded_bytes != 0:
21 | return self.__obj.downloaded_bytes
22 | else:
23 | return async_to_sync(get_path_size, self.__listener.dir)
24 |
25 | def size(self):
26 | return get_readable_file_size(self.__obj.size)
27 |
28 | def status(self):
29 | return MirrorStatus.STATUS_DOWNLOADING
30 |
31 | def name(self):
32 | return self.__obj.name
33 |
34 | def progress(self):
35 | return f'{round(self.__obj.progress, 2)}%'
36 |
37 | def speed(self):
38 | return f'{get_readable_file_size(self.__obj.download_speed)}/s'
39 |
40 | def eta(self):
41 | if self.__obj.eta != '-':
42 | return get_readable_time(self.__obj.eta)
43 | try:
44 | seconds = (self.__obj.size - self.processed_raw()) / \
45 | self.__obj.download_speed
46 | return get_readable_time(seconds)
47 | except:
48 | return '-'
49 |
50 | def download(self):
51 | return self.__obj
52 |
--------------------------------------------------------------------------------
/bot/helper/ext_utils/bulk_links.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from aiofiles import open as aiopen
3 | from aiofiles.os import remove
4 |
5 |
6 | async def get_links_from_message(text, bulk_start, bulk_end):
7 | links_list = text.split('\n')
8 | links_list = [item.strip() for item in links_list if len(item) != 0]
9 |
10 | if bulk_start != 0 and bulk_end != 0:
11 | links_list = links_list[bulk_start:bulk_end]
12 | elif bulk_start != 0:
13 | links_list = links_list[bulk_start:]
14 | elif bulk_end != 0:
15 | links_list = links_list[:bulk_end]
16 |
17 | return links_list
18 |
19 |
20 | async def get_links_from_file(message, bulk_start, bulk_end):
21 | links_list = []
22 | text_file_dir = await message.download()
23 |
24 | async with aiopen(text_file_dir, 'r+') as f:
25 | lines = await f.readlines()
26 | links_list.extend(line.strip() for line in lines if len(line) != 0)
27 |
28 | if bulk_start != 0 and bulk_end != 0:
29 | links_list = links_list[bulk_start:bulk_end]
30 | elif bulk_start != 0:
31 | links_list = links_list[bulk_start:]
32 | elif bulk_end != 0:
33 | links_list = links_list[:bulk_end]
34 |
35 | await remove(text_file_dir)
36 |
37 | return links_list
38 |
39 |
40 | async def extract_bulk_links(message, bulk_start, bulk_end):
41 | bulk_start = int(bulk_start)
42 | bulk_end = int(bulk_end)
43 | if (reply_to := message.replied_to) and (file_ := reply_to.document) and (file_.mime_type == 'text/plain'):
44 | return await get_links_from_file(message.replied_to, bulk_start, bulk_end)
45 | elif text := message.replied_to.text:
46 | return await get_links_from_message(text, bulk_start, bulk_end)
47 | return []
48 |
--------------------------------------------------------------------------------
/bot/modules/gd_count.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from swibots import CommandHandler
3 |
4 | from bot import bot
5 | from bot.helper.mirror_utils.gdrive_utlis.count import gdCount
6 | from bot.helper.switch_helper.message_utils import deleteMessage, sendMessage
7 | from bot.helper.switch_helper.filters import CustomFilters
8 | from bot.helper.switch_helper.bot_commands import BotCommands
9 | from bot.helper.ext_utils.bot_utils import is_gdrive_link, sync_to_async, get_readable_file_size
10 |
11 |
12 | async def countNode(ctx):
13 | message = ctx.event.message
14 | args = message.text.split()
15 | tag = f'@{message.user.username}'
16 |
17 | link = args[1] if len(args) > 1 else ''
18 | if len(link) == 0 and (reply_to := message.replied_to):
19 | link = reply_to.message.split(maxsplit=1)[0].strip()
20 |
21 | if is_gdrive_link(link):
22 | msg = await sendMessage(message, f"Counting: {link}")
23 | name, mime_type, size, files, folders = await sync_to_async(gdCount().count, link, message.user_id)
24 | if mime_type is None:
25 | await sendMessage(message, name)
26 | return
27 | await deleteMessage(msg)
28 | msg = f'Name: {name}'
29 | msg += f'\n\nSize: {get_readable_file_size(size)}'
30 | msg += f'\n\nType: {mime_type}'
31 | if mime_type == 'Folder':
32 | msg += f'\nSubFolders: {folders}'
33 | msg += f'\nFiles: {files}'
34 | msg += f'\n\ncc: {tag}'
35 | else:
36 | msg = 'Send Gdrive link along with command or by replying to the link by command'
37 |
38 | await sendMessage(message, msg)
39 |
40 |
41 | bot.add_handler(CommandHandler(BotCommands.CountCommand,
42 | countNode, filter=CustomFilters.authorized))
43 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/rclone_utils/serve.py:
--------------------------------------------------------------------------------
1 | from asyncio import create_subprocess_exec
2 | from aiofiles.os import path as aiopath
3 | from aiofiles import open as aiopen
4 | from configparser import ConfigParser
5 |
6 | from bot import config_dict, bot_loop
7 |
8 | RcloneServe = []
9 |
10 |
11 | async def rclone_serve_booter():
12 | if not config_dict['RCLONE_SERVE_URL'] or not await aiopath.exists('rclone.conf'):
13 | if RcloneServe:
14 | try:
15 | RcloneServe[0].kill()
16 | RcloneServe.clear()
17 | except:
18 | pass
19 | return
20 | config = ConfigParser()
21 | async with aiopen('rclone.conf', 'r') as f:
22 | contents = await f.read()
23 | config.read_string(contents)
24 | if not config.has_section('combine'):
25 | upstreams = ' '.join(
26 | f'{remote}={remote}:' for remote in config.sections())
27 | config.add_section('combine')
28 | config.set('combine', 'type', 'combine')
29 | config.set('combine', 'upstreams', upstreams)
30 | with open('rclone.conf', 'w') as f:
31 | config.write(f, space_around_delimiters=False)
32 | if RcloneServe:
33 | try:
34 | RcloneServe[0].kill()
35 | RcloneServe.clear()
36 | except:
37 | pass
38 | cmd = ["rclone", "serve", "http", "--config", "rclone.conf", "--no-modtime",
39 | "combine:", "--addr", f":{config_dict['RCLONE_SERVE_PORT']}",
40 | "--vfs-cache-mode", "full", "--vfs-cache-max-age", "1m0s",
41 | "--buffer-size", "64M"]
42 | if (user := config_dict['RCLONE_SERVE_USER']) and (pswd := config_dict['RCLONE_SERVE_PASS']):
43 | cmd.extend(("--user", user, "--pass", pswd))
44 | rcs = await create_subprocess_exec(*cmd)
45 | RcloneServe.append(rcs)
46 |
47 | bot_loop.run_until_complete(rclone_serve_booter())
48 |
--------------------------------------------------------------------------------
/qBittorrent/config/qBittorrent.conf:
--------------------------------------------------------------------------------
1 | [Application]
2 | MemoryWorkingSetLimit=512
3 |
4 | [BitTorrent]
5 | Session\AddExtensionToIncompleteFiles=true
6 | Session\AddTrackersEnabled=false
7 | Session\AnnounceToAllTrackers=true
8 | Session\AnonymousModeEnabled=false
9 | Session\AsyncIOThreadsCount=16
10 | Session\ConnectionSpeed=-1
11 | Session\DHTEnabled=true
12 | Session\DiskCacheSize=-1
13 | Session\GlobalDLSpeedLimit=0
14 | Session\GlobalMaxRatio=-1
15 | Session\GlobalMaxSeedingMinutes=-1
16 | Session\GlobalUPSpeedLimit=0
17 | Session\HashingThreadsCount=1
18 | Session\IgnoreSlowTorrentsForQueueing=true
19 | Session\IncludeOverheadInLimits=false
20 | Session\LSDEnabled=true
21 | Session\MaxActiveCheckingTorrents=3
22 | Session\MaxActiveDownloads=100
23 | Session\MaxActiveTorrents=50
24 | Session\MaxActiveUploads=50
25 | Session\MaxConnections=-1
26 | Session\MaxConnectionsPerTorrent=-1
27 | Session\MaxRatioAction=0
28 | Session\MaxUploads=-1
29 | Session\MaxUploadsPerTorrent=-1
30 | Session\MultiConnectionsPerIp=true
31 | Session\PexEnabled=true
32 | Session\PerformanceWarning=true
33 | Session\Preallocation=true
34 | Session\QueueingSystemEnabled=false
35 | Session\SlowTorrentsDownloadRate=2
36 | Session\SlowTorrentsInactivityTimer=600
37 | Session\SlowTorrentsUploadRate=2
38 | Session\StopTrackerTimeout=5
39 | TrackerEnabled=true
40 |
41 | [LegalNotice]
42 | Accepted=true
43 |
44 | [Meta]
45 | MigrationVersion=4
46 |
47 | [Preferences]
48 | Advanced\DisableRecursiveDownload=false
49 | Advanced\RecheckOnCompletion=false
50 | Advanced\trackerPortForwarding=true
51 | General\PreventFromSuspendWhenDownloading=true
52 | General\PreventFromSuspendWhenSeeding=true
53 | Search\SearchEnabled=true
54 | WebUI\BanDuration=3600
55 | WebUI\CSRFProtection=false
56 | WebUI\ClickjackingProtection=false
57 | WebUI\Enabled=true
58 | WebUI\HTTPS\Enabled=false
59 | WebUI\HostHeaderValidation=false
60 | WebUI\LocalHostAuth=false
61 | WebUI\MaxAuthenticationFailCount=10
62 | WebUI\Port=8090
63 | WebUI\SecureCookie=false
64 | WebUI\SessionTimeout=3600
65 | WebUI\UseUPnP=false
66 |
--------------------------------------------------------------------------------
/bot/helper/switch_helper/button_build.py:
--------------------------------------------------------------------------------
1 | from swibots import InlineKeyboardButton, InlineMarkup
2 |
3 |
4 | class ButtonMaker:
5 | def __init__(self):
6 | self.__button = []
7 | self.__header_button = []
8 | self.__footer_button = []
9 |
10 | def ubutton(self, key, link, position=None):
11 | if not position:
12 | self.__button.append(InlineKeyboardButton(text=key, url=link))
13 | elif position == 'header':
14 | self.__header_button.append(
15 | InlineKeyboardButton(text=key, url=link))
16 | elif position == 'footer':
17 | self.__footer_button.append(
18 | InlineKeyboardButton(text=key, url=link))
19 |
20 | def ibutton(self, key, data, position=None):
21 | if not position:
22 | self.__button.append(InlineKeyboardButton(
23 | text=key, callback_data=data))
24 | elif position == 'header':
25 | self.__header_button.append(
26 | InlineKeyboardButton(text=key, callback_data=data))
27 | elif position == 'footer':
28 | self.__footer_button.append(
29 | InlineKeyboardButton(text=key, callback_data=data))
30 |
31 | def build_menu(self, b_cols=1, h_cols=8, f_cols=8):
32 | menu = [self.__button[i:i+b_cols]
33 | for i in range(0, len(self.__button), b_cols)]
34 | if self.__header_button:
35 | h_cnt = len(self.__header_button)
36 | if h_cnt > h_cols:
37 | header_buttons = [self.__header_button[i:i+h_cols]
38 | for i in range(0, len(self.__header_button), h_cols)]
39 | menu = header_buttons + menu
40 | else:
41 | menu.insert(0, self.__header_button)
42 | if self.__footer_button:
43 | if len(self.__footer_button) > f_cols:
44 | [menu.append(self.__footer_button[i:i+f_cols])
45 | for i in range(0, len(self.__footer_button), f_cols)]
46 | else:
47 | menu.append(self.__footer_button)
48 | return InlineMarkup(menu)
49 |
--------------------------------------------------------------------------------
/bot/modules/status.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from swibots import CommandHandler, CallbackQueryHandler, regexp
3 | from psutil import cpu_percent, virtual_memory, disk_usage
4 | from time import time
5 |
6 | from bot import status_reply_dict_lock, download_dict, download_dict_lock, botStartTime, DOWNLOAD_DIR, Interval, config_dict, bot
7 | from bot.helper.switch_helper.filters import CustomFilters
8 | from bot.helper.switch_helper.bot_commands import BotCommands
9 | from bot.helper.switch_helper.message_utils import sendMessage, deleteMessage, auto_delete_message, sendStatusMessage, update_all_messages
10 | from bot.helper.ext_utils.bot_utils import get_readable_file_size, get_readable_time, turn_page, setInterval
11 |
12 |
13 | async def mirror_status(ctx):
14 | message = ctx.event.message
15 | async with download_dict_lock:
16 | count = len(download_dict)
17 | if count == 0:
18 | currentTime = get_readable_time(time() - botStartTime)
19 | free = get_readable_file_size(disk_usage(DOWNLOAD_DIR).free)
20 | msg = 'No Active Downloads !\n___________________________'
21 | msg += f"\nCPU: {cpu_percent()}% | FREE: {free}" \
22 | f"\nRAM: {virtual_memory().percent}% | UPTIME: {currentTime}"
23 | reply_message = await sendMessage(message, msg)
24 | await auto_delete_message(message, reply_message)
25 | else:
26 | await sendStatusMessage(message)
27 | await deleteMessage(message)
28 | async with status_reply_dict_lock:
29 | if Interval:
30 | Interval[0].cancel()
31 | Interval.clear()
32 | Interval.append(setInterval(
33 | config_dict['STATUS_UPDATE_INTERVAL'], update_all_messages))
34 |
35 |
36 | async def status_pages(ctx):
37 |
38 | data = ctx.event.callback_data.split()
39 | if data[1] == "ref":
40 | await update_all_messages(True)
41 | else:
42 | await turn_page(data)
43 |
44 |
45 | bot.add_handler(CommandHandler(BotCommands.StatusCommand, mirror_status, filter=CustomFilters.authorized))
46 | bot.add_handler(CallbackQueryHandler(status_pages, filter=regexp("^status")))
47 |
--------------------------------------------------------------------------------
/driveid.py:
--------------------------------------------------------------------------------
1 | import os
2 | import re
3 | print("\n\n"
4 | " Bot can search files recursively, but you have to add the list of drives you want to search.\n"
5 | " Use the following format: (You can use 'root' in the ID in case you wan to use main drive.)\n"
6 | " teamdrive NAME --> anything that you likes\n"
7 | " teamdrive ID --> id of teamdrives in which you likes to search ('root' for main drive)\n"
8 | " teamdrive INDEX URL --> enter index url for this drive.\n"
9 | " go to the respective drive and copy the url from address bar\n")
10 | msg = ''
11 | if os.path.exists('list_drives.txt'):
12 | with open('list_drives.txt', 'r+') as f:
13 | lines = f.read()
14 | if not re.match(r'^\s*$', lines):
15 | print(lines)
16 | print("\n\n"
17 | " DO YOU WISH TO KEEP THE ABOVE DETAILS THAT YOU PREVIOUSLY ADDED???? ENTER (y/n)\n"
18 | " IF NOTHING SHOWS ENTER n")
19 | while 1:
20 | choice = input()
21 | if choice in ['y', 'Y']:
22 | msg = f'{lines}'
23 | break
24 | elif choice in ['n', 'N']:
25 | break
26 | else:
27 | print(
28 | "\n\n DO YOU WISH TO KEEP THE ABOVE DETAILS ???? y/n <=== this is option ..... OPEN YOUR EYES & READ...")
29 | num = int(input(" How Many Drive/Folder You Likes To Add : "))
30 | for count in range(1, num + 1):
31 | print(f"\n > DRIVE - {count}\n")
32 | name = input(" Enter Drive NAME (anything) : ")
33 | id = input(" Enter Drive ID : ")
34 | index = input(" Enter Drive INDEX URL (optional) : ")
35 | if not name or not id:
36 | print("\n\n ERROR : Dont leave the name/id without filling.")
37 | exit(1)
38 | name = name.replace(" ", "_")
39 | if index:
40 | if index[-1] == "/":
41 | index = index[:-1]
42 | else:
43 | index = ''
44 | msg += f"{name} {id} {index}\n"
45 | with open('list_drives.txt', 'w') as file:
46 | file.truncate(0)
47 | file.write(msg)
48 | print("\n\n Done!")
49 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/status_utils/zip_status.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from time import time
3 |
4 | from bot import LOGGER
5 | from bot.helper.ext_utils.bot_utils import get_readable_file_size, MirrorStatus, get_readable_time, async_to_sync
6 | from bot.helper.ext_utils.fs_utils import get_path_size
7 |
8 |
9 | class ZipStatus:
10 | def __init__(self, name, size, gid, listener):
11 | self.__name = name
12 | self.__size = size
13 | self.__gid = gid
14 | self.__listener = listener
15 | self.__start_time = time()
16 | self.message = listener.message
17 |
18 | def gid(self):
19 | return self.__gid
20 |
21 | def speed_raw(self):
22 | return self.processed_raw() / (time() - self.__start_time)
23 |
24 | def progress_raw(self):
25 | try:
26 | return self.processed_raw() / self.__size * 100
27 | except:
28 | return 0
29 |
30 | def progress(self):
31 | return f'{round(self.progress_raw(), 2)}%'
32 |
33 | def speed(self):
34 | return f'{get_readable_file_size(self.speed_raw())}/s'
35 |
36 | def name(self):
37 | return self.__name
38 |
39 | def size(self):
40 | return get_readable_file_size(self.__size)
41 |
42 | def eta(self):
43 | try:
44 | seconds = (self.__size - self.processed_raw()) / self.speed_raw()
45 | return get_readable_time(seconds)
46 | except:
47 | return '-'
48 |
49 | def status(self):
50 | return MirrorStatus.STATUS_ARCHIVING
51 |
52 | def processed_raw(self):
53 | if self.__listener.newDir:
54 | return async_to_sync(get_path_size, self.__listener.newDir)
55 | else:
56 | return async_to_sync(get_path_size, self.__listener.dir) - self.__size
57 |
58 | def processed_bytes(self):
59 | return get_readable_file_size(self.processed_raw())
60 |
61 | def download(self):
62 | return self
63 |
64 | async def cancel_download(self):
65 | LOGGER.info(f'Cancelling Archive: {self.__name}')
66 | if self.__listener.suproc is not None:
67 | self.__listener.suproc.kill()
68 | else:
69 | self.__listener.suproc = 'cancelled'
70 | await self.__listener.onUploadError('archiving stopped by user!')
71 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/status_utils/extract_status.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from time import time
3 |
4 | from bot import LOGGER
5 | from bot.helper.ext_utils.bot_utils import get_readable_file_size, MirrorStatus, get_readable_time, async_to_sync
6 | from bot.helper.ext_utils.fs_utils import get_path_size
7 |
8 |
9 | class ExtractStatus:
10 | def __init__(self, name, size, gid, listener):
11 | self.__name = name
12 | self.__size = size
13 | self.__gid = gid
14 | self.__listener = listener
15 | self.__uid = listener.uid
16 | self.__start_time = time()
17 | self.message = listener.message
18 |
19 | def gid(self):
20 | return self.__gid
21 |
22 | def speed_raw(self):
23 | return self.processed_raw() / (time() - self.__start_time)
24 |
25 | def progress_raw(self):
26 | try:
27 | return self.processed_raw() / self.__size * 100
28 | except:
29 | return 0
30 |
31 | def progress(self):
32 | return f'{round(self.progress_raw(), 2)}%'
33 |
34 | def speed(self):
35 | return f'{get_readable_file_size(self.speed_raw())}/s'
36 |
37 | def name(self):
38 | return self.__name
39 |
40 | def size(self):
41 | return get_readable_file_size(self.__size)
42 |
43 | def eta(self):
44 | try:
45 | seconds = (self.__size - self.processed_raw()) / self.speed_raw()
46 | return get_readable_time(seconds)
47 | except:
48 | return '-'
49 |
50 | def status(self):
51 | return MirrorStatus.STATUS_EXTRACTING
52 |
53 | def processed_bytes(self):
54 | return get_readable_file_size(self.processed_raw())
55 |
56 | def processed_raw(self):
57 | if self.__listener.newDir:
58 | return async_to_sync(get_path_size, self.__listener.newDir)
59 | else:
60 | return async_to_sync(get_path_size, self.__listener.dir) - self.__size
61 |
62 | def download(self):
63 | return self
64 |
65 | async def cancel_download(self):
66 | LOGGER.info(f'Cancelling Extract: {self.__name}')
67 | if self.__listener.suproc is not None:
68 | self.__listener.suproc.kill()
69 | else:
70 | self.__listener.suproc = 'cancelled'
71 | await self.__listener.onUploadError('extracting stopped by user!')
72 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/download_utils/gd_download.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from random import SystemRandom
3 | from string import ascii_letters, digits
4 |
5 | from bot import download_dict, download_dict_lock, LOGGER, non_queued_dl, queue_dict_lock
6 | from bot.helper.mirror_utils.gdrive_utlis.download import gdDownload
7 | from bot.helper.mirror_utils.gdrive_utlis.count import gdCount
8 | from bot.helper.mirror_utils.status_utils.gdrive_status import GdriveStatus
9 | from bot.helper.mirror_utils.status_utils.queue_status import QueueStatus
10 | from bot.helper.switch_helper.message_utils import sendMessage, sendStatusMessage
11 | from bot.helper.ext_utils.bot_utils import sync_to_async
12 | from bot.helper.ext_utils.task_manager import is_queued, stop_duplicate_check
13 |
14 |
15 | async def add_gd_download(link, path, listener, newname):
16 | drive = gdCount()
17 | name, mime_type, size, _, _ = await sync_to_async(drive.count, link, listener.user_id)
18 | if mime_type is None:
19 | await sendMessage(listener.message, name)
20 | return
21 |
22 | name = newname or name
23 | gid = ''.join(SystemRandom().choices(ascii_letters + digits, k=12))
24 |
25 | msg, button = await stop_duplicate_check(name, listener)
26 | if msg:
27 | await sendMessage(listener.message, msg, button)
28 | return
29 |
30 | added_to_queue, event = await is_queued(listener.uid)
31 | if added_to_queue:
32 | LOGGER.info(f"Added to Queue/Download: {name}")
33 | async with download_dict_lock:
34 | download_dict[listener.uid] = QueueStatus(
35 | name, size, gid, listener, 'dl')
36 | await sendStatusMessage(listener.message)
37 | await event.wait()
38 | async with download_dict_lock:
39 | if listener.uid not in download_dict:
40 | return
41 | from_queue = True
42 | else:
43 | from_queue = False
44 |
45 | drive = gdDownload(name, path, listener)
46 | async with download_dict_lock:
47 | download_dict[listener.uid] = GdriveStatus(
48 | drive, size, listener.message, gid, 'dl')
49 |
50 | async with queue_dict_lock:
51 | non_queued_dl.add(listener.uid)
52 |
53 | if from_queue:
54 | LOGGER.info(f'Start Queued Download from GDrive: {name}')
55 | else:
56 | LOGGER.info(f"Download from GDrive: {name}")
57 | await sendStatusMessage(listener.message)
58 |
59 | await sync_to_async(drive.download, link)
60 |
--------------------------------------------------------------------------------
/update.py:
--------------------------------------------------------------------------------
1 | from logging import FileHandler, StreamHandler, INFO, basicConfig, error as log_error, info as log_info
2 | from os import path as ospath, environ, remove
3 | from subprocess import run as srun
4 | from requests import get as rget
5 | from dotenv import load_dotenv, dotenv_values
6 | from pymongo import MongoClient
7 |
8 | if ospath.exists('log.txt'):
9 | with open('log.txt', 'r+') as f:
10 | f.truncate(0)
11 |
12 | if ospath.exists('rlog.txt'):
13 | remove('rlog.txt')
14 |
15 | basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
16 | handlers=[FileHandler('log.txt'), StreamHandler()],
17 | level=INFO)
18 |
19 | load_dotenv('config.env', override=True)
20 |
21 | try:
22 | if bool(environ.get('_____REMOVE_THIS_LINE_____')):
23 | log_error('The README.md file there to be read! Exiting now!')
24 | exit()
25 | except:
26 | pass
27 |
28 | BOT_TOKEN = environ.get('BOT_TOKEN', '')
29 | if len(BOT_TOKEN) == 0:
30 | log_error("BOT_TOKEN variable is missing! Exiting now")
31 | exit(1)
32 |
33 | bot_id = BOT_TOKEN.split('.', 1)[0]
34 |
35 | DATABASE_URL = environ.get('DATABASE_URL', '')
36 | if len(DATABASE_URL) == 0:
37 | DATABASE_URL = None
38 |
39 | if DATABASE_URL is not None:
40 | conn = MongoClient(DATABASE_URL)
41 | db = conn.mlsb
42 | old_config = db.settings.deployConfig.find_one({'_id': bot_id})
43 | config_dict = db.settings.config.find_one({'_id': bot_id})
44 | if old_config is not None:
45 | del old_config['_id']
46 | if (old_config is not None and old_config == dict(dotenv_values('config.env')) or old_config is None) \
47 | and config_dict is not None:
48 | environ['UPSTREAM_REPO'] = config_dict['UPSTREAM_REPO']
49 | environ['UPSTREAM_BRANCH'] = config_dict['UPSTREAM_BRANCH']
50 | conn.close()
51 |
52 | UPSTREAM_REPO = environ.get('UPSTREAM_REPO', '')
53 | if len(UPSTREAM_REPO) == 0:
54 | UPSTREAM_REPO = None
55 |
56 | UPSTREAM_BRANCH = environ.get('UPSTREAM_BRANCH', '')
57 | if len(UPSTREAM_BRANCH) == 0:
58 | UPSTREAM_BRANCH = 'master'
59 |
60 | if UPSTREAM_REPO is not None:
61 | if ospath.exists('.git'):
62 | srun(["rm", "-rf", ".git"])
63 |
64 | update = srun([f"git init -q \
65 | && git config --global user.email e.anastayyar@gmail.com \
66 | && git config --global user.name mltb \
67 | && git add . \
68 | && git commit -sm update -q \
69 | && git remote add origin {UPSTREAM_REPO} \
70 | && git fetch origin -q \
71 | && git reset --hard origin/{UPSTREAM_BRANCH} -q"], shell=True)
72 |
73 | if update.returncode == 0:
74 | log_info('Successfully updated with latest commit from UPSTREAM_REPO')
75 | else:
76 | log_error(
77 | 'Something went wrong while updating, check UPSTREAM_REPO if valid or not!')
78 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/gdrive_utlis/count.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from logging import getLogger
3 | from tenacity import RetryError
4 |
5 | from bot.helper.mirror_utils.gdrive_utlis.helper import GoogleDriveHelper
6 |
7 | LOGGER = getLogger(__name__)
8 |
9 |
10 | class gdCount(GoogleDriveHelper):
11 |
12 | def __init__(self):
13 | super().__init__()
14 |
15 | def count(self, link, user_id):
16 | try:
17 | file_id = self.getIdFromUrl(link, user_id)
18 | except (KeyError, IndexError):
19 | return "Google Drive ID could not be found in the provided link", None, None, None, None
20 | self.service = self.authorize()
21 | LOGGER.info(f"File ID: {file_id}")
22 | try:
23 | return self.__proceed_count(file_id)
24 | except Exception as err:
25 | if isinstance(err, RetryError):
26 | LOGGER.info(
27 | f"Total Attempts: {err.last_attempt.attempt_number}")
28 | err = err.last_attempt.exception()
29 | err = str(err).replace('>', '').replace('<', '')
30 | if "File not found" in err:
31 | if not self.alt_auth and self.use_sa:
32 | self.alt_auth = True
33 | self.use_sa = False
34 | LOGGER.error('File not found. Trying with token.pickle...')
35 | return self.count(link, user_id)
36 | msg = "File not found."
37 | else:
38 | msg = f"Error.\n{err}"
39 | return msg, None, None, None, None
40 |
41 | def __proceed_count(self, file_id):
42 | meta = self.getFileMetadata(file_id)
43 | name = meta['name']
44 | LOGGER.info(f"Counting: {name}")
45 | mime_type = meta.get('mimeType')
46 | if mime_type == self.G_DRIVE_DIR_MIME_TYPE:
47 | self.__gDrive_directory(meta)
48 | mime_type = 'Folder'
49 | else:
50 | if mime_type is None:
51 | mime_type = 'File'
52 | self.total_files += 1
53 | self.__gDrive_file(meta)
54 | return name, mime_type, self.proc_bytes, self.total_files, self.total_folders
55 |
56 | def __gDrive_file(self, filee):
57 | size = int(filee.get('size', 0))
58 | self.proc_bytes += size
59 |
60 | def __gDrive_directory(self, drive_folder):
61 | files = self.getFilesByFolderId(drive_folder['id'])
62 | if len(files) == 0:
63 | return
64 | for filee in files:
65 | shortcut_details = filee.get('shortcutDetails')
66 | if shortcut_details is not None:
67 | mime_type = shortcut_details['targetMimeType']
68 | file_id = shortcut_details['targetId']
69 | filee = self.getFileMetadata(file_id)
70 | else:
71 | mime_type = filee.get('mimeType')
72 | if mime_type == self.G_DRIVE_DIR_MIME_TYPE:
73 | self.total_folders += 1
74 | self.__gDrive_directory(filee)
75 | else:
76 | self.total_files += 1
77 | self.__gDrive_file(filee)
78 |
--------------------------------------------------------------------------------
/bot/helper/ext_utils/telegraph_helper.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from string import ascii_letters
3 | from random import SystemRandom
4 | from asyncio import sleep
5 | from telegraph.aio import Telegraph
6 | from telegraph.exceptions import RetryAfterError
7 |
8 | from bot import LOGGER, bot_loop
9 |
10 |
11 | class TelegraphHelper:
12 | def __init__(self, author_name=None, author_url=None):
13 | self.telegraph = Telegraph(domain='graph.org')
14 | self.short_name = ''.join(SystemRandom().choices(ascii_letters, k=8))
15 | self.access_token = None
16 | self.author_name = author_name
17 | self.author_url = author_url
18 |
19 | async def create_account(self):
20 | await self.telegraph.create_account(
21 | short_name=self.short_name,
22 | author_name=self.author_name,
23 | author_url=self.author_url
24 | )
25 | self.access_token = self.telegraph.get_access_token()
26 | LOGGER.info("Creating Telegraph Account")
27 |
28 | async def create_page(self, title, content):
29 | try:
30 | return await self.telegraph.create_page(
31 | title=title,
32 | author_name=self.author_name,
33 | author_url=self.author_url,
34 | html_content=content
35 | )
36 | except RetryAfterError as st:
37 | LOGGER.warning(
38 | f'Telegraph Flood control exceeded. I will sleep for {st.retry_after} seconds.')
39 | await sleep(st.retry_after)
40 | return await self.create_page(title, content)
41 |
42 | async def edit_page(self, path, title, content):
43 | try:
44 | return await self.telegraph.edit_page(
45 | path=path,
46 | title=title,
47 | author_name=self.author_name,
48 | author_url=self.author_url,
49 | html_content=content
50 | )
51 | except RetryAfterError as st:
52 | LOGGER.warning(
53 | f'Telegraph Flood control exceeded. I will sleep for {st.retry_after} seconds.')
54 | await sleep(st.retry_after)
55 | return await self.edit_page(path, title, content)
56 |
57 | async def edit_telegraph(self, path, telegraph_content):
58 | nxt_page = 1
59 | prev_page = 0
60 | num_of_path = len(path)
61 | for content in telegraph_content:
62 | if nxt_page == 1:
63 | content += f'Next'
64 | nxt_page += 1
65 | else:
66 | if prev_page <= num_of_path:
67 | content += f'Prev'
68 | prev_page += 1
69 | if nxt_page < num_of_path:
70 | content += f' | Next'
71 | nxt_page += 1
72 | await self.edit_page(
73 | path=path[prev_page],
74 | title='Mirror-leech-bot Torrent Search',
75 | content=content
76 | )
77 | return
78 |
79 |
80 | telegraph = TelegraphHelper('Mirror-Leech-Switch-Bot',
81 | 'https://github.com/anasty17/mirror-leech-switch-bot')
82 |
83 | bot_loop.run_until_complete(telegraph.create_account())
84 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/download_utils/rclone_download.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from asyncio import gather
3 | from json import loads
4 | from random import SystemRandom
5 | from string import ascii_letters, digits
6 |
7 | from bot import download_dict, download_dict_lock, queue_dict_lock, non_queued_dl, LOGGER
8 | from bot.helper.ext_utils.bot_utils import cmd_exec
9 | from bot.helper.switch_helper.message_utils import sendMessage, sendStatusMessage
10 | from bot.helper.ext_utils.task_manager import is_queued, stop_duplicate_check
11 | from bot.helper.mirror_utils.status_utils.rclone_status import RcloneStatus
12 | from bot.helper.mirror_utils.status_utils.queue_status import QueueStatus
13 | from bot.helper.mirror_utils.rclone_utils.transfer import RcloneTransferHelper
14 |
15 |
16 | async def add_rclone_download(rc_path, config_path, path, name, listener):
17 | remote, rc_path = rc_path.split(':', 1)
18 | rc_path = rc_path.strip('/')
19 |
20 | cmd1 = ['rclone', 'lsjson', '--fast-list', '--stat', '--no-mimetype',
21 | '--no-modtime', '--config', config_path, f'{remote}:{rc_path}']
22 | cmd2 = ['rclone', 'size', '--fast-list', '--json',
23 | '--config', config_path, f'{remote}:{rc_path}']
24 | res1, res2 = await gather(cmd_exec(cmd1), cmd_exec(cmd2))
25 | if res1[2] != res2[2] != 0:
26 | if res1[2] != -9:
27 | err = res1[1] or res2[1]
28 | msg = f'Error: While getting rclone stat/size. Path: {remote}:{rc_path}. Stderr: {err[:4000]}'
29 | await sendMessage(listener.message, msg)
30 | return
31 | try:
32 | rstat = loads(res1[0])
33 | rsize = loads(res2[0])
34 | except Exception as err:
35 | await sendMessage(listener.message, f'RcloneDownload JsonLoad: {err}')
36 | return
37 | if rstat['IsDir']:
38 | if not name:
39 | name = rc_path.rsplit('/', 1)[-1] if rc_path else remote
40 | path += name
41 | else:
42 | name = rc_path.rsplit('/', 1)[-1]
43 | size = rsize['bytes']
44 | gid = ''.join(SystemRandom().choices(ascii_letters + digits, k=12))
45 |
46 | msg, button = await stop_duplicate_check(name, listener)
47 | if msg:
48 | await sendMessage(listener.message, msg, button)
49 | return
50 |
51 | added_to_queue, event = await is_queued(listener.uid)
52 | if added_to_queue:
53 | LOGGER.info(f"Added to Queue/Download: {name}")
54 | async with download_dict_lock:
55 | download_dict[listener.uid] = QueueStatus(
56 | name, size, gid, listener, 'dl')
57 | await sendStatusMessage(listener.message)
58 | await event.wait()
59 | async with download_dict_lock:
60 | if listener.uid not in download_dict:
61 | return
62 | from_queue = True
63 | else:
64 | from_queue = False
65 |
66 | RCTransfer = RcloneTransferHelper(listener, name)
67 | async with download_dict_lock:
68 | download_dict[listener.uid] = RcloneStatus(
69 | RCTransfer, listener.message, gid, 'dl')
70 | async with queue_dict_lock:
71 | non_queued_dl.add(listener.uid)
72 |
73 | if from_queue:
74 | LOGGER.info(f'Start Queued Download with rclone: {rc_path}')
75 | else:
76 | await sendStatusMessage(listener.message)
77 | LOGGER.info(f"Download with rclone: {rc_path}")
78 |
79 | await RCTransfer.download(remote, rc_path, config_path, path)
80 |
--------------------------------------------------------------------------------
/add_to_team_drive.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 | from google.oauth2.service_account import Credentials
3 | import googleapiclient.discovery
4 | import json
5 | import progress.bar
6 | import glob
7 | import sys
8 | import argparse
9 | import time
10 | from google_auth_oauthlib.flow import InstalledAppFlow
11 | from google.auth.transport.requests import Request
12 | import os
13 | import pickle
14 |
15 | stt = time.time()
16 |
17 | parse = argparse.ArgumentParser(
18 | description='A tool to add service accounts to a shared drive from a folder containing credential files.')
19 | parse.add_argument('--path', '-p', default='accounts',
20 | help='Specify an alternative path to the service accounts folder.')
21 | parse.add_argument('--credentials', '-c', default='./credentials.json',
22 | help='Specify the relative path for the credentials file.')
23 | parse.add_argument('--yes', '-y', default=False,
24 | action='store_true', help='Skips the sanity prompt.')
25 | parsereq = parse.add_argument_group('required arguments')
26 | parsereq.add_argument('--drive-id', '-d',
27 | help='The ID of the Shared Drive.', required=True)
28 |
29 | args = parse.parse_args()
30 | acc_dir = args.path
31 | did = args.drive_id
32 | credentials = glob.glob(args.credentials)
33 |
34 | try:
35 | open(credentials[0], 'r')
36 | print('>> Found credentials.')
37 | except IndexError:
38 | print('>> No credentials found.')
39 | sys.exit(0)
40 |
41 | if not args.yes:
42 | # input('Make sure the following client id is added to the shared drive as Manager:\n' + json.loads((open(
43 | # credentials[0],'r').read()))['installed']['client_id'])
44 | input('>> Make sure the **Google account** that has generated credentials.json\n is added into your Team Drive '
45 | '(shared drive) as Manager\n>> (Press any key to continue)')
46 |
47 | creds = None
48 | if os.path.exists('token_sa.pickle'):
49 | with open('token_sa.pickle', 'rb') as token:
50 | creds = pickle.load(token)
51 | # If there are no (valid) credentials available, let the user log in.
52 | if not creds or not creds.valid:
53 | if creds and creds.expired and creds.refresh_token:
54 | creds.refresh(Request())
55 | else:
56 | flow = InstalledAppFlow.from_client_secrets_file(credentials[0], scopes=[
57 | 'https://www.googleapis.com/auth/admin.directory.group',
58 | 'https://www.googleapis.com/auth/admin.directory.group.member'
59 | ])
60 | # creds = flow.run_local_server(port=0)
61 | creds = flow.run_console()
62 | # Save the credentials for the next run
63 | with open('token_sa.pickle', 'wb') as token:
64 | pickle.dump(creds, token)
65 |
66 | drive = googleapiclient.discovery.build("drive", "v3", credentials=creds)
67 | batch = drive.new_batch_http_request()
68 |
69 | aa = glob.glob('%s/*.json' % acc_dir)
70 | pbar = progress.bar.Bar("Readying accounts", max=len(aa))
71 | for i in aa:
72 | ce = json.loads(open(i, 'r').read())['client_email']
73 | batch.add(drive.permissions().create(fileId=did, supportsAllDrives=True, body={
74 | "role": "organizer",
75 | "type": "user",
76 | "emailAddress": ce
77 | }))
78 | pbar.next()
79 | pbar.finish()
80 | print('Adding...')
81 | batch.execute()
82 |
83 | print('Complete.')
84 | hours, rem = divmod((time.time() - stt), 3600)
85 | minutes, sec = divmod(rem, 60)
86 | print("Elapsed Time:\n{:0>2}:{:0>2}:{:05.2f}".format(
87 | int(hours), int(minutes), sec))
88 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/download_utils/aria2_download.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from bot import aria2, download_dict_lock, download_dict, LOGGER, config_dict, aria2_options, aria2c_global, non_queued_dl, queue_dict_lock
3 | from bot.helper.ext_utils.bot_utils import bt_selection_buttons, sync_to_async
4 | from bot.helper.mirror_utils.status_utils.aria2_status import Aria2Status
5 | from bot.helper.switch_helper.message_utils import sendStatusMessage, sendMessage
6 | from bot.helper.ext_utils.task_manager import is_queued
7 |
8 |
9 | async def add_aria2c_download(link, path, listener, filename, auth, ratio, seed_time):
10 | a2c_opt = {**aria2_options}
11 | [a2c_opt.pop(k) for k in aria2c_global if k in aria2_options]
12 | a2c_opt['dir'] = path
13 | if filename:
14 | a2c_opt['out'] = filename
15 | if auth:
16 | a2c_opt['header'] = f"authorization: {auth}"
17 | if ratio:
18 | a2c_opt['seed-ratio'] = ratio
19 | if seed_time:
20 | a2c_opt['seed-time'] = seed_time
21 | if TORRENT_TIMEOUT := config_dict['TORRENT_TIMEOUT']:
22 | a2c_opt['bt-stop-timeout'] = f'{TORRENT_TIMEOUT}'
23 | added_to_queue, event = await is_queued(listener.uid)
24 | if added_to_queue:
25 | if link.startswith('magnet:'):
26 | a2c_opt['pause-metadata'] = 'true'
27 | else:
28 | a2c_opt['pause'] = 'true'
29 | try:
30 | download = (await sync_to_async(aria2.add, link, a2c_opt))[0]
31 | except Exception as e:
32 | LOGGER.info(f"Aria2c Download Error: {e}")
33 | await sendMessage(listener.message, f'{e}')
34 | return
35 | if download.error_message:
36 | error = str(download.error_message).replace('<', ' ').replace('>', ' ')
37 | LOGGER.info(f"Aria2c Download Error: {error}")
38 | await sendMessage(listener.message, error)
39 | return
40 |
41 | gid = download.gid
42 | name = download.name
43 | async with download_dict_lock:
44 | download_dict[listener.uid] = Aria2Status(
45 | gid, listener, queued=added_to_queue)
46 | if added_to_queue:
47 | LOGGER.info(f"Added to Queue/Download: {name}. Gid: {gid}")
48 | if not listener.select or not download.is_torrent:
49 | await sendStatusMessage(listener.message)
50 | else:
51 | async with queue_dict_lock:
52 | non_queued_dl.add(listener.uid)
53 | LOGGER.info(f"Aria2Download started: {name}. Gid: {gid}")
54 |
55 | if not added_to_queue and (not listener.select or not config_dict['BASE_URL']):
56 | await sendStatusMessage(listener.message)
57 | elif listener.select and download.is_torrent and not download.is_metadata:
58 | if not added_to_queue:
59 | await sync_to_async(aria2.client.force_pause, gid)
60 | SBUTTONS = bt_selection_buttons(gid)
61 | msg = "Your download paused. Choose files then press Done Selecting button to start downloading."
62 | await sendMessage(listener.message, msg, SBUTTONS)
63 |
64 | if added_to_queue:
65 | await event.wait()
66 |
67 | async with download_dict_lock:
68 | if listener.uid not in download_dict:
69 | return
70 | download = download_dict[listener.uid]
71 | download.queued = False
72 | new_gid = download.gid()
73 |
74 | await sync_to_async(aria2.client.unpause, new_gid)
75 | LOGGER.info(f'Start Queued Download from Aria2c: {name}. Gid: {gid}')
76 |
77 | async with queue_dict_lock:
78 | non_queued_dl.add(listener.uid)
79 |
--------------------------------------------------------------------------------
/config_sample.env:
--------------------------------------------------------------------------------
1 | # Remove this line before deploying
2 | _____REMOVE_THIS_LINE_____=True
3 |
4 | # REQUIRED CONFIG
5 | BOT_TOKEN = "" # Require restart after changing it while bot running
6 | OWNER_ID = "" # Require restart after changing it while bot running
7 |
8 | # OPTIONAL CONFIG
9 | DATABASE_URL = "" # Require restart after changing it while bot running
10 | DOWNLOAD_DIR = "/usr/src/app/downloads/" # Require restart after changing it while bot running
11 | AUTHORIZED_CHATS = "" # Require restart after changing it while bot running
12 | SUDO_USERS = "" # Require restart after changing it while bot running
13 | STATUS_LIMIT = "10"
14 | DEFAULT_UPLOAD = "gd"
15 | STATUS_UPDATE_INTERVAL = "10"
16 | AUTO_DELETE_MESSAGE_DURATION = "30"
17 | EXTENSION_FILTER = ""
18 | YT_DLP_OPTIONS = ""
19 | USE_SERVICE_ACCOUNTS = "False"
20 |
21 | # GDrive Tools
22 | GDRIVE_ID = ""
23 | IS_TEAM_DRIVE = "False"
24 | STOP_DUPLICATE = "False"
25 | INDEX_URL = ""
26 |
27 | # Rclone
28 | RCLONE_PATH = ""
29 | RCLONE_FLAGS = ""
30 | RCLONE_SERVE_URL = ""
31 | RCLONE_SERVE_PORT = ""
32 | RCLONE_SERVE_USER = ""
33 | RCLONE_SERVE_PASS = ""
34 |
35 | # Update
36 | UPSTREAM_REPO = ""
37 | UPSTREAM_BRANCH = ""
38 |
39 | # Leech
40 | LEECH_SPLIT_SIZE = ""
41 | AS_DOCUMENT = "False"
42 | EQUAL_SPLITS = "False"
43 | LEECH_FILENAME_PREFIX = ""
44 | LEECH_DUMP_CHAT = ""
45 |
46 | # qBittorrent/Aria2c
47 | TORRENT_TIMEOUT = ""
48 | BASE_URL = ""
49 | BASE_URL_PORT = ""
50 | WEB_PINCODE = "False"
51 |
52 | # Queueing system
53 | QUEUE_ALL = ""
54 | QUEUE_DOWNLOAD = ""
55 | QUEUE_UPLOAD = ""
56 |
57 | # RSS
58 | RSS_DELAY = "600"
59 | RSS_CHAT = ""
60 |
61 | # Mega
62 | MEGA_EMAIL = ""
63 | MEGA_PASSWORD = ""
64 |
65 | # Torrent Search
66 | SEARCH_API_LINK = ""
67 | SEARCH_LIMIT = "0"
68 | SEARCH_PLUGINS = '["https://raw.githubusercontent.com/qbittorrent/search-plugins/master/nova3/engines/piratebay.py",
69 | "https://raw.githubusercontent.com/qbittorrent/search-plugins/master/nova3/engines/limetorrents.py",
70 | "https://raw.githubusercontent.com/qbittorrent/search-plugins/master/nova3/engines/torlock.py",
71 | "https://raw.githubusercontent.com/qbittorrent/search-plugins/master/nova3/engines/torrentscsv.py",
72 | "https://raw.githubusercontent.com/qbittorrent/search-plugins/master/nova3/engines/eztv.py",
73 | "https://raw.githubusercontent.com/qbittorrent/search-plugins/master/nova3/engines/torrentproject.py",
74 | "https://raw.githubusercontent.com/MaurizioRicci/qBittorrent_search_engines/master/kickass_torrent.py",
75 | "https://raw.githubusercontent.com/MaurizioRicci/qBittorrent_search_engines/master/yts_am.py",
76 | "https://raw.githubusercontent.com/MadeOfMagicAndWires/qBit-plugins/master/engines/linuxtracker.py",
77 | "https://raw.githubusercontent.com/MadeOfMagicAndWires/qBit-plugins/master/engines/nyaasi.py",
78 | "https://raw.githubusercontent.com/LightDestory/qBittorrent-Search-Plugins/master/src/engines/ettv.py",
79 | "https://raw.githubusercontent.com/LightDestory/qBittorrent-Search-Plugins/master/src/engines/glotorrents.py",
80 | "https://raw.githubusercontent.com/LightDestory/qBittorrent-Search-Plugins/master/src/engines/thepiratebay.py",
81 | "https://raw.githubusercontent.com/v1k45/1337x-qBittorrent-search-plugin/master/leetx.py",
82 | "https://raw.githubusercontent.com/nindogo/qbtSearchScripts/master/magnetdl.py",
83 | "https://raw.githubusercontent.com/msagca/qbittorrent_plugins/main/uniondht.py",
84 | "https://raw.githubusercontent.com/khensolomon/leyts/master/yts.py"]'
85 |
--------------------------------------------------------------------------------
/bot/modules/gd_search.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from swibots import CommandHandler, CallbackQueryHandler, regexp
3 |
4 | from bot import LOGGER, bot, user_data
5 | from bot.helper.mirror_utils.gdrive_utlis.search import gdSearch
6 | from bot.helper.switch_helper.message_utils import sendMessage, editMessage
7 | from bot.helper.switch_helper.filters import CustomFilters
8 | from bot.helper.switch_helper.bot_commands import BotCommands
9 | from bot.helper.switch_helper.button_build import ButtonMaker
10 | from bot.helper.ext_utils.bot_utils import sync_to_async, get_telegraph_list
11 |
12 |
13 | async def list_buttons(user_id, isRecursive=True, user_token=False):
14 | buttons = ButtonMaker()
15 | buttons.ibutton("Folders", f"list_types {user_id} folders {isRecursive} {user_token}")
16 | buttons.ibutton("Files", f"list_types {user_id} files {isRecursive} {user_token}")
17 | buttons.ibutton("Both", f"list_types {user_id} both {isRecursive} {user_token}")
18 | buttons.ibutton(f"Recursive: {isRecursive}",
19 | f"list_types {user_id} rec {isRecursive} {user_token}")
20 | buttons.ibutton(f"User Token: {user_token}",
21 | f"list_types {user_id} ut {isRecursive} {user_token}")
22 | buttons.ibutton("Cancel", f"list_types {user_id} cancel")
23 | return buttons.build_menu(2)
24 |
25 |
26 | async def _list_drive(key, message, item_type, isRecursive, user_token, user_id):
27 | LOGGER.info(f"listing: {key}")
28 | if user_token:
29 | user_dict = user_data.get(user_id, {})
30 | target_id = user_dict.get('gdrive_id', '') or ''
31 | LOGGER.info(target_id)
32 | else:
33 | target_id = ''
34 | telegraph_content, contents_no = await sync_to_async(gdSearch(isRecursive=isRecursive, itemType=item_type).drive_list, key, target_id, user_id)
35 | if telegraph_content:
36 | try:
37 | button = await get_telegraph_list(telegraph_content)
38 | except Exception as e:
39 | await editMessage(message, e)
40 | return
41 | msg = f"Found {contents_no} result for {key}"
42 | await editMessage(message, msg, button)
43 | else:
44 | await editMessage(message, f'No result found for {key}')
45 |
46 |
47 | async def select_type(ctx):
48 | data = ctx.event.callback_data.split()
49 | message = ctx.event.message
50 | user_id = ctx.event.action_by_id
51 | key = message.replied_to.message.split(maxsplit=1)[1].strip()
52 | if user_id != int(data[1]):
53 | await ctx.event.answer(text="Not Yours!", show_alert=True)
54 | return
55 | elif data[2] == 'rec':
56 | isRecursive = not bool(eval(data[3]))
57 | buttons = await list_buttons(user_id, isRecursive, eval(data[4]))
58 | return await editMessage(message, 'Choose list options:', buttons)
59 | elif data[2] == 'ut':
60 | user_token = not bool(eval(data[4]))
61 | buttons = await list_buttons(user_id, eval(data[3]), user_token)
62 | return await editMessage(message, 'Choose list options:', buttons)
63 | elif data[2] == 'cancel':
64 | return await editMessage(message, "list has been canceled!")
65 | item_type = data[2]
66 | isRecursive = eval(data[3])
67 | user_token = eval(data[4])
68 | await editMessage(message, f"Searching for {key}")
69 | await _list_drive(key, message, item_type, isRecursive, user_token, user_id)
70 |
71 |
72 | async def gdrive_search(ctx):
73 | message = ctx.event.message
74 | if len(message.message.split()) == 1:
75 | return await sendMessage(message, 'Send a search key along with command')
76 | user_id = ctx.event.action_by_id
77 | buttons = await list_buttons(user_id)
78 | await sendMessage(message, 'Choose list options:', buttons)
79 |
80 | bot.add_handler(CommandHandler(BotCommands.ListCommand, gdrive_search, filter=CustomFilters.authorized))
81 | bot.add_handler(CallbackQueryHandler(select_type, filter=regexp("^list_types")))
82 |
--------------------------------------------------------------------------------
/bot/modules/eval.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from swibots import CommandHandler
3 | from os import path as ospath, getcwd, chdir
4 | from traceback import format_exc
5 | from textwrap import indent
6 | from io import StringIO, BytesIO
7 | from contextlib import redirect_stdout
8 |
9 | from bot import LOGGER, bot
10 | from bot.helper.switch_helper.filters import CustomFilters
11 | from bot.helper.switch_helper.bot_commands import BotCommands
12 | from bot.helper.switch_helper.message_utils import sendFile, sendMessage
13 | from bot.helper.ext_utils.bot_utils import sync_to_async
14 |
15 | namespaces = {}
16 |
17 |
18 | def namespace_of(message):
19 | chat = message.user or message.group
20 | chat_id = chat.id
21 | if chat_id not in namespaces:
22 | namespaces[chat_id] = {
23 | '__builtins__': globals()['__builtins__'],
24 | 'bot': bot,
25 | 'message': message,
26 | 'user': message.user,
27 | 'chat': chat}
28 |
29 | return namespaces[chat_id]
30 |
31 |
32 | def log_input(message):
33 | chat_id = message.user_id or message.group_id
34 | LOGGER.info(
35 | f"IN: {message.message} (user={message.user_id}, chat={chat_id})")
36 |
37 |
38 | async def send(msg, message):
39 | if len(str(msg)) > 2000:
40 | with BytesIO(str.encode(msg)) as out_file:
41 | await sendFile(message, out_file, "output.txt")
42 | else:
43 | LOGGER.info(f"OUT: '{msg}'")
44 | await sendMessage(message, f"{msg}")
45 |
46 |
47 | async def evaluate(ctx):
48 | message = ctx.event.message
49 | await send(await sync_to_async(do, eval, message), message)
50 |
51 |
52 | async def execute(ctx):
53 | message = ctx.event.message
54 | await send(await sync_to_async(do, exec, message), message)
55 |
56 |
57 | def cleanup_code(code):
58 | if code.startswith('```') and code.endswith('```'):
59 | return '\n'.join(code.split('\n')[1:-1])
60 | return code.strip('` \n')
61 |
62 |
63 | def do(func, message):
64 | log_input(message)
65 | content = message.message.split(maxsplit=1)[-1]
66 | body = cleanup_code(content)
67 | env = namespace_of(message)
68 |
69 | chdir(getcwd())
70 | with open(ospath.join(getcwd(), 'bot/modules/temp.txt'), 'w') as temp:
71 | temp.write(body)
72 |
73 | stdout = StringIO()
74 |
75 | to_compile = f'def func():\n{indent(body, " ")}'
76 |
77 | try:
78 | exec(to_compile, env)
79 | except Exception as e:
80 | return f'{e.__class__.__name__}: {e}'
81 |
82 | func = env['func']
83 |
84 | try:
85 | with redirect_stdout(stdout):
86 | func_return = func()
87 | except Exception as e:
88 | value = stdout.getvalue()
89 | return f'{value}{format_exc()}'
90 | else:
91 | value = stdout.getvalue()
92 | result = None
93 | if func_return is None:
94 | if value:
95 | result = f'{value}'
96 | else:
97 | try:
98 | result = f'{repr(eval(body, env))}'
99 | except:
100 | pass
101 | else:
102 | result = f'{value}{func_return}'
103 | if result:
104 | return result
105 |
106 |
107 | async def clear(ctx):
108 | message = ctx.event.message
109 | chat_id = message.user_id or message.group_id
110 | log_input(message)
111 | global namespaces
112 | if chat_id in namespaces:
113 | del namespaces[chat_id]
114 | await send("Locals Cleared.", message)
115 |
116 |
117 | bot.add_handler(CommandHandler(BotCommands.EvalCommand,
118 | evaluate, filter=CustomFilters.owner))
119 | bot.add_handler(CommandHandler(BotCommands.ExecCommand,
120 | execute, filter=CustomFilters.owner))
121 | bot.add_handler(CommandHandler(BotCommands.ClearLocalsCommand,
122 | clear, filter=CustomFilters.owner))
123 |
--------------------------------------------------------------------------------
/bot/modules/authorize.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from swibots import CommandHandler
3 |
4 | from bot import user_data, DATABASE_URL, bot
5 | from bot.helper.switch_helper.message_utils import sendMessage
6 | from bot.helper.switch_helper.filters import CustomFilters
7 | from bot.helper.switch_helper.bot_commands import BotCommands
8 | from bot.helper.ext_utils.db_handler import DbManger
9 | from bot.helper.ext_utils.bot_utils import update_user_ldata
10 |
11 |
12 | async def authorize(ctx):
13 | message = ctx.event.message
14 | msg = message.message.split()
15 | if len(msg) > 1:
16 | id_ = msg[1].strip()
17 | try:
18 | id_ = int(id_)
19 | except:
20 | pass
21 | elif reply_to := message.replied_to:
22 | id_ = reply_to.user_id
23 | else:
24 | id_ = message.community_id
25 | if not id:
26 | return
27 | if id_ in user_data and user_data[id_].get('is_auth'):
28 | msg = 'Already Authorized!'
29 | else:
30 | update_user_ldata(id_, 'is_auth', True)
31 | if DATABASE_URL:
32 | await DbManger().update_user_data(id_)
33 | msg = 'Authorized'
34 | await sendMessage(message, msg)
35 |
36 |
37 | async def unauthorize(ctx):
38 | message = ctx.event.message
39 | msg = message.message.split()
40 | if len(msg) > 1:
41 | id_ = msg[1].strip()
42 | try:
43 | id_ = int(id_)
44 | except:
45 | pass
46 | elif reply_to := message.replied_to:
47 | id_ = reply_to.user_id
48 | else:
49 | id_ = message.community_id
50 | if not id:
51 | return
52 | if id_ not in user_data or user_data[id_].get('is_auth'):
53 | update_user_ldata(id_, 'is_auth', False)
54 | if DATABASE_URL:
55 | await DbManger().update_user_data(id_)
56 | msg = 'Unauthorized'
57 | else:
58 | msg = 'Already Unauthorized!'
59 | await sendMessage(message, msg)
60 |
61 |
62 | async def addSudo(ctx):
63 | id_ = ""
64 | message = ctx.event.message
65 | msg = message.message.split()
66 | if len(msg) > 1:
67 | id_ = msg[1].strip()
68 | try:
69 | id_ = int(id_)
70 | except:
71 | pass
72 | elif reply_to := message.replied_to:
73 | id_ = reply_to.user_id
74 | if id_:
75 | if id_ in user_data and user_data[id_].get('is_sudo'):
76 | msg = 'Already Sudo!'
77 | else:
78 | update_user_ldata(id_, 'is_sudo', True)
79 | if DATABASE_URL:
80 | await DbManger().update_user_data(id_)
81 | msg = 'Promoted as Sudo'
82 | else:
83 | msg = "Give ID or Reply To message of whom you want to Promote."
84 | await sendMessage(message, msg)
85 |
86 |
87 | async def removeSudo(ctx):
88 | id_ = ""
89 | message = ctx.event.message
90 | msg = message.message.split()
91 | if len(msg) > 1:
92 | id_ = msg[1].strip()
93 | try:
94 | id_ = int(id_)
95 | except:
96 | pass
97 | elif reply_to := message.replied_to:
98 | id_ = reply_to.user_id
99 | if id_ and id_ not in user_data or user_data[id_].get('is_sudo'):
100 | update_user_ldata(id_, 'is_sudo', False)
101 | if DATABASE_URL:
102 | await DbManger().update_user_data(id_)
103 | msg = 'Demoted'
104 | else:
105 | msg = "Give ID or Reply To message of whom you want to remove from Sudo"
106 | await sendMessage(message, msg)
107 |
108 | bot.add_handler(CommandHandler(BotCommands.AuthorizeCommand,
109 | authorize, filter=CustomFilters.sudo))
110 | bot.add_handler(CommandHandler(BotCommands.UnAuthorizeCommand,
111 | unauthorize, filter=CustomFilters.sudo))
112 | bot.add_handler(CommandHandler(BotCommands.AddSudoCommand,
113 | addSudo, filter=CustomFilters.sudo))
114 | bot.add_handler(CommandHandler(BotCommands.RmSudoCommand,
115 | removeSudo, filter=CustomFilters.sudo))
116 |
--------------------------------------------------------------------------------
/bot/modules/cancel_mirror.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from asyncio import sleep
3 | from swibots import CommandHandler, CallbackQueryHandler, regexp
4 |
5 | from bot import download_dict, bot, download_dict_lock, OWNER_ID, user_data
6 | from bot.helper.switch_helper.bot_commands import BotCommands
7 | from bot.helper.switch_helper.filters import CustomFilters
8 | from bot.helper.switch_helper.message_utils import sendMessage, auto_delete_message, deleteMessage
9 | from bot.helper.ext_utils.bot_utils import getDownloadByGid, getAllDownload, MirrorStatus
10 | from bot.helper.switch_helper import button_build
11 |
12 |
13 | async def cancel_mirror(ctx):
14 | message = ctx.event.message
15 | user_id = message.user_id
16 | msg = message.message.split()
17 | if len(msg) > 1:
18 | gid = msg[1]
19 | dl = await getDownloadByGid(gid)
20 | if dl is None:
21 | await sendMessage(message, f"GID: {gid} Not Found.")
22 | return
23 | elif reply_to_id := message.replied_to_id:
24 | async with download_dict_lock:
25 | dl = download_dict.get(reply_to_id, None)
26 | if dl is None:
27 | await sendMessage(message, "This is not an active task!")
28 | return
29 | elif len(msg) == 1:
30 | msg = "Reply to an active Command message which was used to start the download" \
31 | f" or send /{BotCommands.CancelMirror} GID to cancel it!"
32 | await sendMessage(message, msg)
33 | return
34 | if OWNER_ID != user_id and dl.message.user_id != user_id and \
35 | (user_id not in user_data or not user_data[user_id].get('is_sudo')):
36 | await sendMessage(message, "This task is not for you!")
37 | return
38 | obj = dl.download()
39 | await obj.cancel_download()
40 |
41 |
42 | async def cancel_all(status):
43 | matches = await getAllDownload(status)
44 | if not matches:
45 | return False
46 | for dl in matches:
47 | obj = dl.download()
48 | await obj.cancel_download()
49 | await sleep(1)
50 | return True
51 |
52 |
53 | async def cancell_all_buttons(ctx):
54 | message = ctx.event.message
55 | async with download_dict_lock:
56 | count = len(download_dict)
57 | if count == 0:
58 | await sendMessage(message, "No active tasks!")
59 | return
60 | buttons = button_build.ButtonMaker()
61 | buttons.ibutton("Downloading", f"canall {MirrorStatus.STATUS_DOWNLOADING}")
62 | buttons.ibutton("Uploading", f"canall {MirrorStatus.STATUS_UPLOADING}")
63 | buttons.ibutton("Seeding", f"canall {MirrorStatus.STATUS_SEEDING}")
64 | buttons.ibutton("Cloning", f"canall {MirrorStatus.STATUS_CLONING}")
65 | buttons.ibutton("Extracting", f"canall {MirrorStatus.STATUS_EXTRACTING}")
66 | buttons.ibutton("Archiving", f"canall {MirrorStatus.STATUS_ARCHIVING}")
67 | buttons.ibutton("QueuedDl", f"canall {MirrorStatus.STATUS_QUEUEDL}")
68 | buttons.ibutton("QueuedUp", f"canall {MirrorStatus.STATUS_QUEUEUP}")
69 | buttons.ibutton("Paused", f"canall {MirrorStatus.STATUS_PAUSED}")
70 | buttons.ibutton("All", "canall all")
71 | buttons.ibutton("Close", "canall close")
72 | button = buttons.build_menu(2)
73 | can_msg = await sendMessage(message, 'Choose tasks to cancel.', button)
74 | await auto_delete_message(message, can_msg)
75 |
76 |
77 | async def cancel_all_update(ctx):
78 | data = ctx.event.callback_data.split()
79 | message = ctx.event.message
80 | reply_to = message.replied_to
81 | if data[1] == 'close':
82 | await deleteMessage(reply_to)
83 | await deleteMessage(message)
84 | else:
85 | res = await cancel_all(data[1])
86 | if not res:
87 | await sendMessage(reply_to, f"No matching tasks for {data[1]}!")
88 |
89 |
90 | bot.add_handler(CommandHandler(BotCommands.CancelMirror,
91 | cancel_mirror, filter=CustomFilters.authorized))
92 | bot.add_handler(CommandHandler(BotCommands.CancelAllCommand,
93 | cancell_all_buttons, filter=CustomFilters.sudo))
94 | bot.add_handler(CallbackQueryHandler(cancel_all_update,
95 | filter=regexp("^canall") & CustomFilters.sudo))
96 |
--------------------------------------------------------------------------------
/bot/helper/switch_helper/message_utils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from asyncio import sleep
3 | from time import time
4 |
5 | from bot import config_dict, LOGGER, status_reply_dict, status_reply_dict_lock, Interval, bot, download_dict_lock
6 | from bot.helper.ext_utils.bot_utils import get_readable_message, setInterval, sync_to_async
7 |
8 |
9 | async def sendMessage(message, text, buttons=None):
10 | try:
11 | return await message.reply_text(text, inline_markup=buttons)
12 | except Exception as e:
13 | LOGGER.error(str(e))
14 | return str(e)
15 |
16 |
17 | async def editMessage(message, text, buttons=None):
18 | try:
19 | return await message.edit_text(text, inline_markup=buttons)
20 | except Exception as e:
21 | LOGGER.error(str(e))
22 | return str(e)
23 |
24 |
25 | async def sendFile(msg, file, description=''):
26 | try:
27 | return await msg.reply_media(description, file, description=description)
28 | except Exception as e:
29 | LOGGER.error(str(e))
30 | return str(e)
31 |
32 |
33 | async def sendRss(text):
34 | RC = config_dict['RSS_CHAT']
35 | if '|' in RC:
36 | commmunity_id, group_id = RC.split('|')
37 | receiver_id = None
38 | else:
39 | receiver_id = int(RC)
40 | commmunity_id, group_id = None, None
41 | try:
42 | return await bot.send_message(text, community_id=commmunity_id, group_id=group_id, user_id=receiver_id)
43 | except Exception as e:
44 | LOGGER.error(str(e))
45 | return str(e)
46 |
47 |
48 | async def deleteMessage(message):
49 | try:
50 | await message.delete()
51 | except Exception as e:
52 | LOGGER.error(str(e))
53 |
54 |
55 | async def auto_delete_message(cmd_message=None, bot_message=None):
56 | if config_dict['AUTO_DELETE_MESSAGE_DURATION'] != -1:
57 | await sleep(config_dict['AUTO_DELETE_MESSAGE_DURATION'])
58 | if cmd_message is not None:
59 | await deleteMessage(cmd_message)
60 | if bot_message is not None:
61 | await deleteMessage(bot_message)
62 |
63 |
64 | async def delete_all_messages():
65 | async with status_reply_dict_lock:
66 | for key, data in list(status_reply_dict.items()):
67 | try:
68 | del status_reply_dict[key]
69 | await deleteMessage(data[0])
70 | except Exception as e:
71 | LOGGER.error(str(e))
72 |
73 |
74 | async def update_all_messages(force=False):
75 | async with status_reply_dict_lock:
76 | if not status_reply_dict or not Interval or (not force and time() - list(status_reply_dict.values())[0][1] < 3):
77 | return
78 | for chat_id in list(status_reply_dict.keys()):
79 | status_reply_dict[chat_id][1] = time()
80 | async with download_dict_lock:
81 | msg, buttons = await sync_to_async(get_readable_message)
82 | if msg is None:
83 | return
84 | async with status_reply_dict_lock:
85 | for chat_id in list(status_reply_dict.keys()):
86 | if status_reply_dict[chat_id] and msg != status_reply_dict[chat_id][0].message:
87 | rmsg = await editMessage(status_reply_dict[chat_id][0], msg, buttons)
88 | if isinstance(rmsg, str):
89 | del status_reply_dict[chat_id]
90 | continue
91 | status_reply_dict[chat_id][0].message = msg
92 | status_reply_dict[chat_id][1] = time()
93 |
94 |
95 | async def sendStatusMessage(msg):
96 | async with download_dict_lock:
97 | progress, buttons = await sync_to_async(get_readable_message)
98 | if progress is None:
99 | return
100 | async with status_reply_dict_lock:
101 | chat_id = msg.receiver_id or msg.group_id
102 | if chat_id in list(status_reply_dict.keys()):
103 | message = status_reply_dict[chat_id][0]
104 | await deleteMessage(message)
105 | del status_reply_dict[chat_id]
106 | message = await sendMessage(msg, progress, buttons)
107 | message.message = progress
108 | status_reply_dict[chat_id] = [message, time()]
109 | if not Interval:
110 | Interval.append(setInterval(
111 | config_dict['STATUS_UPDATE_INTERVAL'], update_all_messages))
112 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/status_utils/aria2_status.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from time import time
3 |
4 | from bot import aria2, LOGGER
5 | from bot.helper.ext_utils.bot_utils import MirrorStatus, get_readable_time, sync_to_async
6 |
7 |
8 | def get_download(gid):
9 | try:
10 | return aria2.get_download(gid)
11 | except Exception as e:
12 | LOGGER.error(f'{e}: Aria2c, Error while getting torrent info')
13 | return None
14 |
15 |
16 | class Aria2Status:
17 |
18 | def __init__(self, gid, listener, seeding=False, queued=False):
19 | self.__gid = gid
20 | self.__download = get_download(gid)
21 | self.__listener = listener
22 | self.queued = queued
23 | self.start_time = 0
24 | self.seeding = seeding
25 | self.message = listener.message
26 |
27 | def __update(self):
28 | if self.__download is None:
29 | self.__download = get_download(self.__gid)
30 | else:
31 | self.__download = self.__download.live
32 | if self.__download.followed_by_ids:
33 | self.__gid = self.__download.followed_by_ids[0]
34 | self.__download = get_download(self.__gid)
35 |
36 | def progress(self):
37 | return self.__download.progress_string()
38 |
39 | def processed_bytes(self):
40 | return self.__download.completed_length_string()
41 |
42 | def speed(self):
43 | return self.__download.download_speed_string()
44 |
45 | def name(self):
46 | return self.__download.name
47 |
48 | def size(self):
49 | return self.__download.total_length_string()
50 |
51 | def eta(self):
52 | return self.__download.eta_string()
53 |
54 | def status(self):
55 | self.__update()
56 | if self.__download.is_waiting or self.queued:
57 | if self.seeding:
58 | return MirrorStatus.STATUS_QUEUEUP
59 | else:
60 | return MirrorStatus.STATUS_QUEUEDL
61 | elif self.__download.is_paused:
62 | return MirrorStatus.STATUS_PAUSED
63 | elif self.__download.seeder and self.seeding:
64 | return MirrorStatus.STATUS_SEEDING
65 | else:
66 | return MirrorStatus.STATUS_DOWNLOADING
67 |
68 | def seeders_num(self):
69 | return self.__download.num_seeders
70 |
71 | def leechers_num(self):
72 | return self.__download.connections
73 |
74 | def uploaded_bytes(self):
75 | return self.__download.upload_length_string()
76 |
77 | def upload_speed(self):
78 | self.__update()
79 | return self.__download.upload_speed_string()
80 |
81 | def ratio(self):
82 | return f"{round(self.__download.upload_length / self.__download.completed_length, 3)}"
83 |
84 | def seeding_time(self):
85 | return get_readable_time(time() - self.start_time)
86 |
87 | def download(self):
88 | return self
89 |
90 | def listener(self):
91 | return self.__listener
92 |
93 | def gid(self):
94 | self.__update()
95 | return self.__gid
96 |
97 | async def cancel_download(self):
98 | self.__update()
99 | await sync_to_async(self.__update)
100 | if self.__download.seeder and self.seeding:
101 | LOGGER.info(f"Cancelling Seed: {self.name()}")
102 | await self.__listener.onUploadError(f"Seeding stopped with Ratio: {self.ratio()} and Time: {self.seeding_time()}")
103 | await sync_to_async(aria2.remove, [self.__download], force=True, files=True)
104 | elif downloads := self.__download.followed_by:
105 | LOGGER.info(f"Cancelling Download: {self.name()}")
106 | await self.__listener.onDownloadError('Download cancelled by user!')
107 | downloads.append(self.__download)
108 | await sync_to_async(aria2.remove, downloads, force=True, files=True)
109 | else:
110 | if self.queued:
111 | LOGGER.info(f'Cancelling QueueDl: {self.name()}')
112 | msg = 'task have been removed from queue/download'
113 | else:
114 | LOGGER.info(f"Cancelling Download: {self.name()}")
115 | msg = 'Download stopped by user!'
116 | await self.__listener.onDownloadError(msg)
117 | await sync_to_async(aria2.remove, [self.__download], force=True, files=True)
118 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/status_utils/qbit_status.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from asyncio import sleep
3 |
4 | from bot import LOGGER, get_client, QbTorrents, qb_listener_lock
5 | from bot.helper.ext_utils.bot_utils import MirrorStatus, get_readable_file_size, get_readable_time, sync_to_async
6 |
7 |
8 | def get_download(client, tag):
9 | try:
10 | return client.torrents_info(tag=tag)[0]
11 | except Exception as e:
12 | LOGGER.error(
13 | f'{e}: Qbittorrent, while getting torrent info. Tag: {tag}')
14 | return None
15 |
16 |
17 | class QbittorrentStatus:
18 |
19 | def __init__(self, listener, seeding=False, queued=False):
20 | self.__client = get_client()
21 | self.__listener = listener
22 | self.__info = get_download(self.__client, f'{self.__listener.uid}')
23 | self.queued = queued
24 | self.seeding = seeding
25 | self.message = listener.message
26 |
27 | def __update(self):
28 | new_info = get_download(self.__client, f'{self.__listener.uid}')
29 | if new_info is not None:
30 | self.__info = new_info
31 |
32 | def progress(self):
33 | return f'{round(self.__info.progress*100, 2)}%'
34 |
35 | def processed_bytes(self):
36 | return get_readable_file_size(self.__info.downloaded)
37 |
38 | def speed(self):
39 | return f"{get_readable_file_size(self.__info.dlspeed)}/s"
40 |
41 | def name(self):
42 | if self.__info.state in ["metaDL", "checkingResumeData"]:
43 | return f"[METADATA]{self.__info.name}"
44 | else:
45 | return self.__info.name
46 |
47 | def size(self):
48 | return get_readable_file_size(self.__info.size)
49 |
50 | def eta(self):
51 | return get_readable_time(self.__info.eta)
52 |
53 | def status(self):
54 | self.__update()
55 | state = self.__info.state
56 | if state == "queuedDL" or self.queued:
57 | return MirrorStatus.STATUS_QUEUEDL
58 | elif state == "queuedUP":
59 | return MirrorStatus.STATUS_QUEUEUP
60 | elif state in ["pausedDL", "pausedUP"]:
61 | return MirrorStatus.STATUS_PAUSED
62 | elif state in ["checkingUP", "checkingDL"]:
63 | return MirrorStatus.STATUS_CHECKING
64 | elif state in ["stalledUP", "uploading"] and self.seeding:
65 | return MirrorStatus.STATUS_SEEDING
66 | else:
67 | return MirrorStatus.STATUS_DOWNLOADING
68 |
69 | def seeders_num(self):
70 | return self.__info.num_seeds
71 |
72 | def leechers_num(self):
73 | return self.__info.num_leechs
74 |
75 | def uploaded_bytes(self):
76 | return get_readable_file_size(self.__info.uploaded)
77 |
78 | def upload_speed(self):
79 | return f"{get_readable_file_size(self.__info.upspeed)}/s"
80 |
81 | def ratio(self):
82 | return f"{round(self.__info.ratio, 3)}"
83 |
84 | def seeding_time(self):
85 | return get_readable_time(self.__info.seeding_time)
86 |
87 | def download(self):
88 | return self
89 |
90 | def gid(self):
91 | return self.hash()[:12]
92 |
93 | def hash(self):
94 | self.__update()
95 | return self.__info.hash
96 |
97 | def client(self):
98 | return self.__client
99 |
100 | def listener(self):
101 | return self.__listener
102 |
103 | async def cancel_download(self):
104 | self.__update()
105 | await sync_to_async(self.__client.torrents_pause, torrent_hashes=self.__info.hash)
106 | if not self.seeding:
107 | if self.queued:
108 | LOGGER.info(f'Cancelling QueueDL: {self.name()}')
109 | msg = 'task have been removed from queue/download'
110 | else:
111 | LOGGER.info(f"Cancelling Download: {self.__info.name}")
112 | msg = 'Download stopped by user!'
113 | await sleep(0.3)
114 | await self.__listener.onDownloadError(msg)
115 | await sync_to_async(self.__client.torrents_delete, torrent_hashes=self.__info.hash, delete_files=True)
116 | await sync_to_async(self.__client.torrents_delete_tags, tags=self.__info.tags)
117 | async with qb_listener_lock:
118 | if self.__info.tags in QbTorrents:
119 | del QbTorrents[self.__info.tags]
120 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/download_utils/direct_link_generator_license.md:
--------------------------------------------------------------------------------
1 | RAPHIELSCAPE PUBLIC LICENSE
2 | Version 1.c, June 2019
3 |
4 | Copyright (C) 2019 Raphielscape LLC.
5 | Copyright (C) 2019 Devscapes Open Source Holding GmbH.
6 |
7 | Everyone is permitted to copy and distribute verbatim or modified
8 | copies of this license document, and changing it is allowed as long
9 | as the name is changed.
10 |
11 | RAPHIELSCAPE PUBLIC LICENSE
12 | A-1. DEFINITIONS
13 |
14 | 0. “This License” refers to version 1.c of the Raphielscape Public License.
15 |
16 | 1. “Copyright” also means copyright-like laws that apply to other kinds of works.
17 |
18 | 2. “The Work" refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”.
19 | “Licensees” and “recipients” may be individuals or organizations.
20 |
21 | 3. To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission,
22 | other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work
23 | or a work “based on” the earlier work.
24 |
25 | 4. Source Form. The “source form” for a work means the preferred form of the work for making modifications to it.
26 | “Object code” means any non-source form of a work.
27 |
28 | The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and
29 | (for an executable work) run the object code and to modify the work, including scripts to control those activities.
30 |
31 | The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source.
32 | The Corresponding Source for a work in source code form is that same work.
33 |
34 | 5. "The author" refers to "author" of the code, which is the one that made the particular code which exists inside of
35 | the Corresponding Source.
36 |
37 | 6. "Owner" refers to any parties which is made the early form of the Corresponding Source.
38 |
39 | A-2. TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
40 |
41 | 0. You must give any other recipients of the Work or Derivative Works a copy of this License; and
42 |
43 | 1. You must cause any modified files to carry prominent notices stating that You changed the files; and
44 |
45 | 2. You must retain, in the Source form of any Derivative Works that You distribute,
46 | this license, all copyright, patent, trademark, authorships and attribution notices
47 | from the Source form of the Work; and
48 |
49 | 3. Respecting the author and owner of works that are distributed in any way.
50 |
51 | You may add Your own copyright statement to Your modifications and may provide
52 | additional or different license terms and conditions for use, reproduction,
53 | or distribution of Your modifications, or for any such Derivative Works as a whole,
54 | provided Your use, reproduction, and distribution of the Work otherwise complies
55 | with the conditions stated in this License.
56 |
57 | B. DISCLAIMER OF WARRANTY
58 |
59 | THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR
60 | IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
61 | FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS
62 | BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
63 | (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
64 | OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
66 | OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
67 |
68 | C. REVISED VERSION OF THIS LICENSE
69 |
70 | The Devscapes Open Source Holding GmbH. may publish revised and/or new versions of the
71 | Raphielscape Public License from time to time. Such new versions will be similar in spirit
72 | to the present version, but may differ in detail to address new problems or concerns.
73 |
74 | Each version is given a distinguishing version number. If the Program specifies that a
75 | certain numbered version of the Raphielscape Public License "or any later version" applies to it,
76 | you have the option of following the terms and conditions either of that numbered version or of
77 | any later version published by the Devscapes Open Source Holding GmbH. If the Program does not specify a
78 | version number of the Raphielscape Public License, you may choose any version ever published
79 | by the Devscapes Open Source Holding GmbH.
80 |
81 | END OF LICENSE
82 |
--------------------------------------------------------------------------------
/web/nodes.py:
--------------------------------------------------------------------------------
1 | from anytree import NodeMixin
2 | from re import findall as re_findall
3 | from os import environ
4 |
5 | DOWNLOAD_DIR = environ.get('DOWNLOAD_DIR', '')
6 | if len(DOWNLOAD_DIR) == 0:
7 | DOWNLOAD_DIR = '/usr/src/app/downloads/'
8 | elif not DOWNLOAD_DIR.endswith("/"):
9 | DOWNLOAD_DIR += '/'
10 |
11 |
12 | class TorNode(NodeMixin):
13 | def __init__(self, name, is_folder=False, is_file=False, parent=None, size=None, priority=None, file_id=None, progress=None):
14 | super().__init__()
15 | self.name = name
16 | self.is_folder = is_folder
17 | self.is_file = is_file
18 |
19 | if parent is not None:
20 | self.parent = parent
21 | if size is not None:
22 | self.fsize = size
23 | if priority is not None:
24 | self.priority = priority
25 | if file_id is not None:
26 | self.file_id = file_id
27 | if progress is not None:
28 | self.progress = progress
29 |
30 |
31 | def qb_get_folders(path):
32 | return path.split("/")
33 |
34 | def get_folders(path):
35 | fs = re_findall(f'{DOWNLOAD_DIR}[0-9]+/(.+)', path)[0]
36 | return fs.split('/')
37 |
38 | def make_tree(res, aria2=False):
39 | parent = TorNode("Torrent")
40 | if not aria2:
41 | for i in res:
42 | folders = qb_get_folders(i.name)
43 | if len(folders) > 1:
44 | previous_node = parent
45 | for j in range(len(folders)-1):
46 | current_node = next((k for k in previous_node.children if k.name == folders[j]), None)
47 | if current_node is None:
48 | previous_node = TorNode(folders[j], parent=previous_node, is_folder=True)
49 | else:
50 | previous_node = current_node
51 | TorNode(folders[-1], is_file=True, parent=previous_node, size=i.size, priority=i.priority, \
52 | file_id=i.id, progress=round(i.progress*100, 5))
53 | else:
54 | TorNode(folders[-1], is_file=True, parent=parent, size=i.size, priority=i.priority, \
55 | file_id=i.id, progress=round(i.progress*100, 5))
56 | else:
57 | for i in res:
58 | folders = get_folders(i['path'])
59 | priority = 1
60 | if i['selected'] == 'false':
61 | priority = 0
62 | if len(folders) > 1:
63 | previous_node = parent
64 | for j in range(len(folders)-1):
65 | current_node = next((k for k in previous_node.children if k.name == folders[j]), None)
66 | if current_node is None:
67 | previous_node = TorNode(folders[j], parent=previous_node, is_folder=True)
68 | else:
69 | previous_node = current_node
70 | TorNode(folders[-1], is_file=True, parent=previous_node, size=i['length'], priority=priority, \
71 | file_id=i['index'], progress=round((int(i['completedLength'])/int(i['length']))*100, 5))
72 | else:
73 | TorNode(folders[-1], is_file=True, parent=parent, size=i['length'], priority=priority, \
74 | file_id=i['index'], progress=round((int(i['completedLength'])/int(i['length']))*100, 5))
75 | return create_list(parent, ["", 0])
76 |
77 | """
78 | def print_tree(parent):
79 | for pre, _, node in RenderTree(parent):
80 | treestr = u"%s%s" % (pre, node.name)
81 | print(treestr.ljust(8), node.is_folder, node.is_file)
82 | """
83 |
84 | def create_list(par, msg):
85 | if par.name != ".unwanted":
86 | msg[0] += '
"
106 | return msg
107 |
--------------------------------------------------------------------------------
/bot/helper/ext_utils/task_manager.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from asyncio import Event
3 |
4 | from bot import config_dict, queued_dl, queued_up, non_queued_up, non_queued_dl, queue_dict_lock, LOGGER
5 | from bot.helper.mirror_utils.gdrive_utlis.search import gdSearch
6 | from bot.helper.ext_utils.fs_utils import get_base_name
7 | from bot.helper.ext_utils.bot_utils import sync_to_async, get_telegraph_list, is_gdrive_id
8 |
9 |
10 | async def stop_duplicate_check(name, listener):
11 | if (listener.upDest.startswith('mtp:') and not listener.user_dict.get('stop_duplicate', False)
12 | or config_dict['STOP_DUPLICATE'] and listener.upDest.startswith('mtp:')
13 | or listener.isLeech
14 | or not is_gdrive_id(listener.upDest)
15 | or listener.select
16 | ):
17 | return False, None
18 | LOGGER.info(f'Checking File/Folder if already in Drive: {name}')
19 | if listener.compress:
20 | name = f"{name}.zip"
21 | elif listener.extract:
22 | try:
23 | name = get_base_name(name)
24 | except:
25 | name = None
26 | if name is not None:
27 | telegraph_content, contents_no = await sync_to_async(gdSearch(stopDup=True).drive_list, name, listener.upDest, listener.user_id)
28 | if telegraph_content:
29 | msg = f"File/Folder is already available in Drive.\nHere are {contents_no} list results:"
30 | button = await get_telegraph_list(telegraph_content)
31 | return msg, button
32 | return False, None
33 |
34 |
35 | async def is_queued(uid):
36 | all_limit = config_dict['QUEUE_ALL']
37 | dl_limit = config_dict['QUEUE_DOWNLOAD']
38 | event = None
39 | added_to_queue = False
40 | if all_limit or dl_limit:
41 | async with queue_dict_lock:
42 | dl = len(non_queued_dl)
43 | up = len(non_queued_up)
44 | if (all_limit and dl + up >= all_limit and (not dl_limit or dl >= dl_limit)) or (dl_limit and dl >= dl_limit):
45 | added_to_queue = True
46 | event = Event()
47 | queued_dl[uid] = event
48 | return added_to_queue, event
49 |
50 |
51 | async def start_dl_from_queued(uid):
52 | queued_dl[uid].set()
53 | del queued_dl[uid]
54 |
55 |
56 | async def start_up_from_queued(uid):
57 | queued_up[uid].set()
58 | del queued_up[uid]
59 |
60 |
61 | async def start_from_queued():
62 | if all_limit := config_dict['QUEUE_ALL']:
63 | dl_limit = config_dict['QUEUE_DOWNLOAD']
64 | up_limit = config_dict['QUEUE_UPLOAD']
65 | async with queue_dict_lock:
66 | dl = len(non_queued_dl)
67 | up = len(non_queued_up)
68 | all_ = dl + up
69 | if all_ < all_limit:
70 | f_tasks = all_limit - all_
71 | if queued_up and (not up_limit or up < up_limit):
72 | for index, uid in enumerate(list(queued_up.keys()), start=1):
73 | f_tasks = all_limit - all_
74 | await start_up_from_queued(uid)
75 | f_tasks -= 1
76 | if f_tasks == 0 or (up_limit and index >= up_limit - up):
77 | break
78 | if queued_dl and (not dl_limit or dl < dl_limit) and f_tasks != 0:
79 | for index, uid in enumerate(list(queued_dl.keys()), start=1):
80 | await start_dl_from_queued(uid)
81 | if (dl_limit and index >= dl_limit - dl) or index == f_tasks:
82 | break
83 | return
84 |
85 | if up_limit := config_dict['QUEUE_UPLOAD']:
86 | async with queue_dict_lock:
87 | up = len(non_queued_up)
88 | if queued_up and up < up_limit:
89 | f_tasks = up_limit - up
90 | for index, uid in enumerate(list(queued_up.keys()), start=1):
91 | await start_up_from_queued(uid)
92 | if index == f_tasks:
93 | break
94 | else:
95 | async with queue_dict_lock:
96 | if queued_up:
97 | for uid in list(queued_up.keys()):
98 | await start_up_from_queued(uid)
99 |
100 | if dl_limit := config_dict['QUEUE_DOWNLOAD']:
101 | async with queue_dict_lock:
102 | dl = len(non_queued_dl)
103 | if queued_dl and dl < dl_limit:
104 | f_tasks = dl_limit - dl
105 | for index, uid in enumerate(list(queued_dl.keys()), start=1):
106 | await start_dl_from_queued(uid)
107 | if index == f_tasks:
108 | break
109 | else:
110 | async with queue_dict_lock:
111 | if queued_dl:
112 | for uid in list(queued_dl.keys()):
113 | await start_dl_from_queued(uid)
114 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/download_utils/switch_download.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from logging import getLogger, ERROR
3 | from time import time
4 | from asyncio import Lock
5 |
6 | from bot import LOGGER, download_dict, download_dict_lock, non_queued_dl, queue_dict_lock
7 | from bot.helper.mirror_utils.status_utils.switch_status import SwitchStatus
8 | from bot.helper.mirror_utils.status_utils.queue_status import QueueStatus
9 | from bot.helper.switch_helper.message_utils import sendStatusMessage, sendMessage
10 | from bot.helper.ext_utils.task_manager import is_queued, stop_duplicate_check
11 |
12 | global_lock = Lock()
13 | GLOBAL_GID = set()
14 | getLogger("swibots.app").setLevel(ERROR)
15 |
16 |
17 | class SwitchDownloadHelper:
18 |
19 | def __init__(self, listener):
20 | self.name = ""
21 | self.__processed_bytes = 0
22 | self.__start_time = time()
23 | self.__listener = listener
24 | self.__id = ""
25 | self.__is_cancelled = False
26 |
27 | @property
28 | def speed(self):
29 | return self.__processed_bytes / (time() - self.__start_time)
30 |
31 | @property
32 | def processed_bytes(self):
33 | return self.__processed_bytes
34 |
35 | async def __onDownloadStart(self, name, size, file_id, from_queue):
36 | async with global_lock:
37 | GLOBAL_GID.add(file_id)
38 | self.name = name
39 | self.__id = file_id
40 | async with download_dict_lock:
41 | download_dict[self.__listener.uid] = SwitchStatus(
42 | self, size, self.__listener.message, file_id[:12], 'dl')
43 | async with queue_dict_lock:
44 | non_queued_dl.add(self.__listener.uid)
45 | if not from_queue:
46 | await sendStatusMessage(self.__listener.message)
47 | LOGGER.info(f'Download from Switch: {name}')
48 | else:
49 | LOGGER.info(f'Start Queued Download from Switch: {name}')
50 |
51 | async def __onDownloadProgress(self, progress):
52 | if self.__is_cancelled:
53 | progress.client.cancel()
54 | self.__processed_bytes = progress.downloaded
55 |
56 | async def __onDownloadError(self, error):
57 | async with global_lock:
58 | try:
59 | GLOBAL_GID.remove(self.__id)
60 | except:
61 | pass
62 | await self.__listener.onDownloadError(error)
63 |
64 | async def __onDownloadComplete(self):
65 | await self.__listener.onDownloadComplete()
66 | async with global_lock:
67 | GLOBAL_GID.remove(self.__id)
68 |
69 | async def __download(self, message, path):
70 | try:
71 | download = await message.download(file_name=path, block=True, progress=self.__onDownloadProgress)
72 | if self.__is_cancelled:
73 | await self.__onDownloadError('Cancelled by user!')
74 | return
75 | except Exception as e:
76 | LOGGER.error(str(e))
77 | await self.__onDownloadError(str(e))
78 | return
79 | if download is not None:
80 | await self.__onDownloadComplete()
81 | elif not self.__is_cancelled:
82 | await self.__onDownloadError('Internal error occurred')
83 |
84 | async def add_download(self, message, path, filename):
85 | if message.is_media:
86 | media = message.media_info
87 | async with global_lock:
88 | download = media.source_id not in GLOBAL_GID
89 |
90 | if download:
91 | if not filename:
92 | name = media.description
93 | else:
94 | name = filename
95 | path = path + name
96 | size = media.file_size
97 | gid = media.source_id
98 |
99 | msg, button = await stop_duplicate_check(name, self.__listener)
100 | if msg:
101 | await sendMessage(self.__listener.message, msg, button)
102 | return
103 |
104 | added_to_queue, event = await is_queued(self.__listener.uid)
105 | if added_to_queue:
106 | LOGGER.info(f"Added to Queue/Download: {name}")
107 | async with download_dict_lock:
108 | download_dict[self.__listener.uid] = QueueStatus(
109 | name, size, gid, self.__listener, 'dl')
110 | await sendStatusMessage(self.__listener.message)
111 | await event.wait()
112 | async with download_dict_lock:
113 | if self.__listener.uid not in download_dict:
114 | return
115 | from_queue = True
116 | else:
117 | from_queue = False
118 | await self.__onDownloadStart(name, size, gid, from_queue)
119 | await self.__download(message, path)
120 | else:
121 | await self.__onDownloadError('File already being downloaded!')
122 | else:
123 | await self.__onDownloadError('No document in the replied message!')
124 |
125 | async def cancel_download(self):
126 | self.__is_cancelled = True
127 | LOGGER.info(
128 | f'Cancelling download on user request: name: {self.name} id: {self.__id}')
129 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/download_utils/qbit_download.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from time import time
3 | from aiofiles.os import remove as aioremove, path as aiopath
4 |
5 | from bot import download_dict, download_dict_lock, get_client, LOGGER, config_dict, non_queued_dl, queue_dict_lock
6 | from bot.helper.mirror_utils.status_utils.qbit_status import QbittorrentStatus
7 | from bot.helper.switch_helper.message_utils import sendMessage, deleteMessage, sendStatusMessage
8 | from bot.helper.ext_utils.bot_utils import bt_selection_buttons, sync_to_async
9 | from bot.helper.listeners.qbit_listener import onDownloadStart
10 | from bot.helper.ext_utils.task_manager import is_queued
11 |
12 |
13 | """
14 | Only v1 torrents
15 | #from hashlib import sha1
16 | #from base64 import b16encode, b32decode
17 | #from bencoding import bencode, bdecode
18 | #from re import search as re_search
19 | def __get_hash_magnet(mgt: str):
20 | hash_ = re_search(r'(?<=xt=urn:btih:)[a-zA-Z0-9]+', mgt).group(0)
21 | if len(hash_) == 32:
22 | hash_ = b16encode(b32decode(hash_.upper())).decode()
23 | return str(hash_)
24 |
25 | def __get_hash_file(path):
26 | with open(path, "rb") as f:
27 | decodedDict = bdecode(f.read())
28 | hash_ = sha1(bencode(decodedDict[b'info'])).hexdigest()
29 | return str(hash_)
30 | """
31 |
32 |
33 | async def add_qb_torrent(link, path, listener, ratio, seed_time):
34 | client = await sync_to_async(get_client)
35 | ADD_TIME = time()
36 | try:
37 | url = link
38 | tpath = None
39 | if await aiopath.exists(link):
40 | url = None
41 | tpath = link
42 | added_to_queue, event = await is_queued(listener.uid)
43 | op = await sync_to_async(client.torrents_add, url, tpath, path, is_paused=added_to_queue, tags=f'{listener.uid}',
44 | ratio_limit=ratio, seeding_time_limit=seed_time, headers={'user-agent': 'Wget/1.12'})
45 | if op.lower() == "ok.":
46 | tor_info = await sync_to_async(client.torrents_info, tag=f'{listener.uid}')
47 | if len(tor_info) == 0:
48 | while True:
49 | tor_info = await sync_to_async(client.torrents_info, tag=f'{listener.uid}')
50 | if len(tor_info) > 0:
51 | break
52 | elif time() - ADD_TIME >= 120:
53 | msg = "Not added! Check if the link is valid or not. If it's torrent file then report, this happens if torrent file size above 10mb."
54 | await sendMessage(listener.message, msg)
55 | return
56 | tor_info = tor_info[0]
57 | ext_hash = tor_info.hash
58 | else:
59 | await sendMessage(listener.message, "This Torrent already added or unsupported/invalid link/file.")
60 | return
61 |
62 | async with download_dict_lock:
63 | download_dict[listener.uid] = QbittorrentStatus(
64 | listener, queued=added_to_queue)
65 | await onDownloadStart(f'{listener.uid}')
66 |
67 | if added_to_queue:
68 | LOGGER.info(
69 | f"Added to Queue/Download: {tor_info.name} - Hash: {ext_hash}")
70 | else:
71 | async with queue_dict_lock:
72 | non_queued_dl.add(listener.uid)
73 | LOGGER.info(
74 | f"QbitDownload started: {tor_info.name} - Hash: {ext_hash}")
75 |
76 | if config_dict['BASE_URL'] and listener.select:
77 | if link.startswith('magnet:'):
78 | metamsg = "Downloading Metadata, wait then you can select files. Use torrent file to avoid this wait."
79 | meta = await sendMessage(listener.message, metamsg)
80 | while True:
81 | tor_info = await sync_to_async(client.torrents_info, tag=f'{listener.uid}')
82 | if len(tor_info) == 0:
83 | await deleteMessage(meta)
84 | return
85 | try:
86 | tor_info = tor_info[0]
87 | if tor_info.state not in ["metaDL", "checkingResumeData", "pausedDL"]:
88 | await deleteMessage(meta)
89 | break
90 | except:
91 | await deleteMessage(meta)
92 | return
93 |
94 | ext_hash = tor_info.hash
95 | if not added_to_queue:
96 | await sync_to_async(client.torrents_pause, torrent_hashes=ext_hash)
97 | SBUTTONS = bt_selection_buttons(ext_hash)
98 | msg = "Your download paused. Choose files then press Done Selecting button to start downloading."
99 | await sendMessage(listener.message, msg, SBUTTONS)
100 | else:
101 | await sendStatusMessage(listener.message)
102 |
103 | if added_to_queue:
104 | await event.wait()
105 |
106 | async with download_dict_lock:
107 | if listener.uid not in download_dict:
108 | return
109 | download_dict[listener.uid].queued = False
110 |
111 | await sync_to_async(client.torrents_resume, torrent_hashes=ext_hash)
112 | LOGGER.info(
113 | f'Start Queued Download from Qbittorrent: {tor_info.name} - Hash: {ext_hash}')
114 |
115 | async with queue_dict_lock:
116 | non_queued_dl.add(listener.uid)
117 | except Exception as e:
118 | await sendMessage(listener.message, str(e))
119 | finally:
120 | if await aiopath.exists(link):
121 | await aioremove(link)
122 |
--------------------------------------------------------------------------------
/bot/helper/ext_utils/fs_utils.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from os import walk, path as ospath
3 | from aiofiles.os import remove as aioremove, path as aiopath, listdir, rmdir, makedirs
4 | from aioshutil import rmtree as aiormtree
5 | from shutil import rmtree
6 | from magic import Magic
7 | from re import split as re_split, I, search as re_search
8 | from subprocess import run as srun
9 | from sys import exit as sexit
10 |
11 | from .exceptions import NotSupportedExtractionArchive
12 | from bot import aria2, LOGGER, DOWNLOAD_DIR, get_client, GLOBAL_EXTENSION_FILTER
13 | from bot.helper.ext_utils.bot_utils import sync_to_async, cmd_exec
14 |
15 | ARCH_EXT = [".tar.bz2", ".tar.gz", ".bz2", ".gz", ".tar.xz", ".tar", ".tbz2", ".tgz", ".lzma2",
16 | ".zip", ".7z", ".z", ".rar", ".iso", ".wim", ".cab", ".apm", ".arj", ".chm",
17 | ".cpio", ".cramfs", ".deb", ".dmg", ".fat", ".hfs", ".lzh", ".lzma", ".mbr",
18 | ".msi", ".mslz", ".nsis", ".ntfs", ".rpm", ".squashfs", ".udf", ".vhd", ".xar"]
19 |
20 | FIRST_SPLIT_REGEX = r'(\.|_)part0*1\.rar$|(\.|_)7z\.0*1$|(\.|_)zip\.0*1$|^(?!.*(\.|_)part\d+\.rar$).*\.rar$'
21 |
22 | SPLIT_REGEX = r'\.r\d+$|\.7z\.\d+$|\.z\d+$|\.zip\.\d+$'
23 |
24 |
25 | def is_first_archive_split(file):
26 | return bool(re_search(FIRST_SPLIT_REGEX, file))
27 |
28 |
29 | def is_archive(file):
30 | return file.endswith(tuple(ARCH_EXT))
31 |
32 |
33 | def is_archive_split(file):
34 | return bool(re_search(SPLIT_REGEX, file))
35 |
36 |
37 | async def clean_target(path):
38 | if await aiopath.exists(path):
39 | LOGGER.info(f"Cleaning Target: {path}")
40 | if await aiopath.isdir(path):
41 | try:
42 | await aiormtree(path)
43 | except:
44 | pass
45 | elif await aiopath.isfile(path):
46 | try:
47 | await aioremove(path)
48 | except:
49 | pass
50 |
51 |
52 | async def clean_download(path):
53 | if await aiopath.exists(path):
54 | LOGGER.info(f"Cleaning Download: {path}")
55 | try:
56 | await aiormtree(path)
57 | except:
58 | pass
59 |
60 |
61 | async def start_cleanup():
62 | get_client().torrents_delete(torrent_hashes="all")
63 | try:
64 | await aiormtree(DOWNLOAD_DIR)
65 | except:
66 | pass
67 | await makedirs(DOWNLOAD_DIR)
68 |
69 |
70 | def clean_all():
71 | aria2.remove_all(True)
72 | get_client().torrents_delete(torrent_hashes="all")
73 | try:
74 | rmtree(DOWNLOAD_DIR)
75 | except:
76 | pass
77 |
78 |
79 | def exit_clean_up(signal, frame):
80 | try:
81 | LOGGER.info(
82 | "Please wait, while we clean up and stop the running downloads")
83 | clean_all()
84 | srun(['pkill', '-9', '-f', 'gunicorn|aria2c|qbittorrent-nox|ffmpeg'])
85 | sexit(0)
86 | except KeyboardInterrupt:
87 | LOGGER.warning("Force Exiting before the cleanup finishes!")
88 | sexit(1)
89 |
90 |
91 | async def clean_unwanted(path):
92 | LOGGER.info(f"Cleaning unwanted files/folders: {path}")
93 | for dirpath, _, files in await sync_to_async(walk, path, topdown=False):
94 | for filee in files:
95 | if filee.endswith(".!qB") or filee.endswith('.parts') and filee.startswith('.'):
96 | await aioremove(ospath.join(dirpath, filee))
97 | if dirpath.endswith((".unwanted", "splited_files_mltb", "copied_mltb")):
98 | await aiormtree(dirpath)
99 | for dirpath, _, files in await sync_to_async(walk, path, topdown=False):
100 | if not await listdir(dirpath):
101 | await rmdir(dirpath)
102 |
103 |
104 | async def get_path_size(path):
105 | if await aiopath.isfile(path):
106 | return await aiopath.getsize(path)
107 | total_size = 0
108 | for root, dirs, files in await sync_to_async(walk, path):
109 | for f in files:
110 | abs_path = ospath.join(root, f)
111 | total_size += await aiopath.getsize(abs_path)
112 | return total_size
113 |
114 |
115 | async def count_files_and_folders(path):
116 | total_files = 0
117 | total_folders = 0
118 | for _, dirs, files in await sync_to_async(walk, path):
119 | total_files += len(files)
120 | for f in files:
121 | if f.endswith(tuple(GLOBAL_EXTENSION_FILTER)):
122 | total_files -= 1
123 | total_folders += len(dirs)
124 | return total_folders, total_files
125 |
126 |
127 | def get_base_name(orig_path):
128 | extension = next(
129 | (ext for ext in ARCH_EXT if orig_path.lower().endswith(ext)), ''
130 | )
131 | if extension != '':
132 | return re_split(f'{extension}$', orig_path, maxsplit=1, flags=I)[0]
133 | else:
134 | raise NotSupportedExtractionArchive(
135 | 'File format not supported for extraction')
136 |
137 |
138 | def get_mime_type(file_path):
139 | mime = Magic(mime=True)
140 | mime_type = mime.from_file(file_path)
141 | mime_type = mime_type or "text/plain"
142 | return mime_type
143 |
144 |
145 | async def join_files(path):
146 | files = await listdir(path)
147 | results = []
148 | for file_ in files:
149 | if re_search(r"\.0+2$", file_) and await sync_to_async(get_mime_type, f'{path}/{file_}') == 'application/octet-stream':
150 | final_name = file_.rsplit('.', 1)[0]
151 | cmd = f'cat {path}/{final_name}.* > {path}/{final_name}'
152 | _, stderr, code = await cmd_exec(cmd, True)
153 | if code != 0:
154 | LOGGER.error(f'Failed to join {final_name}, stderr: {stderr}')
155 | else:
156 | results.append(final_name)
157 | if results:
158 | for res in results:
159 | for file_ in files:
160 | if re_search(fr"{res}\.0[0-9]+$", file_):
161 | await aioremove(f'{path}/{file_}')
162 |
--------------------------------------------------------------------------------
/bot/modules/torrent_select.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from swibots import CommandHandler, CallbackQueryHandler, regexp
3 | from aiofiles.os import remove as aioremove, path as aiopath
4 |
5 | from bot import bot, aria2, download_dict, download_dict_lock, OWNER_ID, user_data, LOGGER
6 | from bot.helper.switch_helper.bot_commands import BotCommands
7 | from bot.helper.switch_helper.filters import CustomFilters
8 | from bot.helper.switch_helper.message_utils import sendMessage, sendStatusMessage, deleteMessage
9 | from bot.helper.ext_utils.bot_utils import getDownloadByGid, MirrorStatus, bt_selection_buttons, sync_to_async
10 |
11 |
12 | async def select(ctx):
13 | message = ctx.event.message
14 | user_id = message.user_id
15 | msg = message.message.split()
16 | if len(msg) > 1:
17 | gid = msg[1]
18 | dl = await getDownloadByGid(gid)
19 | if dl is None:
20 | await sendMessage(message, f"GID: {gid} Not Found.")
21 | return
22 | elif reply_to_id := message.replied_to_id:
23 | async with download_dict_lock:
24 | dl = download_dict.get(reply_to_id, None)
25 | if dl is None:
26 | await sendMessage(message, "This is not an active task!")
27 | return
28 | elif len(msg) == 1:
29 | msg = ("Reply to an active /cmd which was used to start the qb-download or add gid along with cmd\n\n"
30 | + "This command mainly for selection incase you decided to select files from already added torrent. "
31 | + "But you can always use /cmd with arg `s` to select files before download start.")
32 | await sendMessage(message, msg)
33 | return
34 |
35 | if OWNER_ID != user_id and dl.message.user_id != user_id and \
36 | (user_id not in user_data or not user_data[user_id].get('is_sudo')):
37 | await sendMessage(message, "This task is not for you!")
38 | return
39 | if dl.status() not in [MirrorStatus.STATUS_DOWNLOADING, MirrorStatus.STATUS_PAUSED, MirrorStatus.STATUS_QUEUEDL]:
40 | await sendMessage(message, 'Task should be in download or pause (incase message deleted by wrong) or queued (status incase you used torrent file)!')
41 | return
42 | if dl.name().startswith('[METADATA]'):
43 | await sendMessage(message, 'Try after downloading metadata finished!')
44 | return
45 |
46 | try:
47 | listener = dl.listener()
48 | if listener.isQbit:
49 | id_ = dl.hash()
50 | client = dl.client()
51 | if not dl.queued:
52 | await sync_to_async(client.torrents_pause, torrent_hashes=id_)
53 | else:
54 | id_ = dl.gid()
55 | if not dl.queued:
56 | try:
57 | await sync_to_async(aria2.client.force_pause, id_)
58 | except Exception as e:
59 | LOGGER.error(
60 | f"{e} Error in pause, this mostly happens after abuse aria2")
61 | listener.select = True
62 | except:
63 | await sendMessage(message, "This is not a bittorrent task!")
64 | return
65 |
66 | SBUTTONS = bt_selection_buttons(id_)
67 | msg = "Your download paused. Choose files then press Done Selecting button to resume downloading."
68 | await sendMessage(message, msg, SBUTTONS)
69 |
70 |
71 | async def get_confirm(ctx):
72 | user_id = ctx.event.action_by_id
73 | data = ctx.event.callback_data.split()
74 | message = ctx.event.message
75 | dl = await getDownloadByGid(data[2])
76 | if dl is None:
77 | await ctx.event.answer("This task has been cancelled!", show_alert=True)
78 | await deleteMessage(message)
79 | return
80 | if hasattr(dl, 'listener'):
81 | listener = dl.listener()
82 | else:
83 | await ctx.event.answer("Not in download state anymore! Keep this message to resume the seed if seed enabled!", show_alert=True)
84 | return
85 | if user_id != listener.user_id:
86 | await ctx.event.answer("This task is not for you!", show_alert=True)
87 | elif data[1] == "pin":
88 | await ctx.event.answer(data[3], show_alert=True)
89 | elif data[1] == "done":
90 | id_ = data[3]
91 | if len(id_) > 20:
92 | client = dl.client()
93 | tor_info = (await sync_to_async(client.torrents_info, torrent_hash=id_))[0]
94 | path = tor_info.content_path.rsplit('/', 1)[0]
95 | res = await sync_to_async(client.torrents_files, torrent_hash=id_)
96 | for f in res:
97 | if f.priority == 0:
98 | f_paths = [f"{path}/{f.name}", f"{path}/{f.name}.!qB"]
99 | for f_path in f_paths:
100 | if await aiopath.exists(f_path):
101 | try:
102 | await aioremove(f_path)
103 | except:
104 | pass
105 | if not dl.queued:
106 | await sync_to_async(client.torrents_resume, torrent_hashes=id_)
107 | else:
108 | res = await sync_to_async(aria2.client.get_files, id_)
109 | for f in res:
110 | if f['selected'] == 'false' and await aiopath.exists(f['path']):
111 | try:
112 | await aioremove(f['path'])
113 | except:
114 | pass
115 | if not dl.queued:
116 | try:
117 | await sync_to_async(aria2.client.unpause, id_)
118 | except Exception as e:
119 | LOGGER.error(
120 | f"{e} Error in resume, this mostly happens after abuse aria2. Try to use select cmd again!")
121 | await sendStatusMessage(message)
122 | await deleteMessage(message)
123 |
124 |
125 | bot.add_handler(CommandHandler(BotCommands.BtSelectCommand,
126 | select, filter=CustomFilters.authorized))
127 | bot.add_handler(CallbackQueryHandler(get_confirm, filter=regexp("^btsel")))
128 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/gdrive_utlis/clone.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from logging import getLogger
3 | from time import time
4 | from os import path as ospath
5 | from googleapiclient.errors import HttpError
6 | from tenacity import retry, wait_exponential, stop_after_attempt, retry_if_exception_type, RetryError
7 |
8 | from bot import config_dict, GLOBAL_EXTENSION_FILTER
9 | from bot.helper.ext_utils.bot_utils import async_to_sync
10 | from bot.helper.mirror_utils.gdrive_utlis.helper import GoogleDriveHelper
11 |
12 | LOGGER = getLogger(__name__)
13 |
14 |
15 | class gdClone(GoogleDriveHelper):
16 |
17 | def __init__(self, name, listener):
18 | super().__init__(listener, name)
19 | self.__start_time = time()
20 | self.is_cloning = True
21 |
22 | def user_setting(self, link):
23 | if self.listener.upDest.startswith('mtp:') or link.startswith('mtp:'):
24 | self.token_path = f'tokens/{self.listener.user_id}.pickle'
25 | self.listener.upDest = self.listener.upDest.lstrip('mtp:')
26 | self.use_sa = False
27 |
28 | def clone(self, link):
29 | self.user_setting(link)
30 | try:
31 | file_id = self.getIdFromUrl(link, self.listener.user_id)
32 | except (KeyError, IndexError):
33 | return "Google Drive ID could not be found in the provided link"
34 | self.service = self.authorize()
35 | msg = ""
36 | LOGGER.info(f"File ID: {file_id}")
37 | try:
38 | meta = self.getFileMetadata(file_id)
39 | mime_type = meta.get("mimeType")
40 | if mime_type == self.G_DRIVE_DIR_MIME_TYPE:
41 | dir_id = self.create_directory(
42 | meta.get('name'), self.listener.upDest)
43 | self.__cloneFolder(meta.get('name'), meta.get('id'), dir_id)
44 | durl = self.G_DRIVE_DIR_BASE_DOWNLOAD_URL.format(dir_id)
45 | if self.is_cancelled:
46 | LOGGER.info("Deleting cloned data from Drive...")
47 | self.service.files().delete(fileId=dir_id, supportsAllDrives=True).execute()
48 | return None, None, None, None, None, None
49 | mime_type = 'Folder'
50 | size = self.proc_bytes
51 | else:
52 | file = self.__copyFile(
53 | meta.get('id'), self.listener.upDest)
54 | msg += f'Name: {file.get("name")}'
55 | durl = self.G_DRIVE_BASE_DOWNLOAD_URL.format(file.get("id"))
56 | if mime_type is None:
57 | mime_type = 'File'
58 | size = int(meta.get('size', 0))
59 | return durl, size, mime_type, self.total_files, self.total_folders, self.getIdFromUrl(durl, self.listener.user_id)
60 | except Exception as err:
61 | if isinstance(err, RetryError):
62 | LOGGER.info(
63 | f"Total Attempts: {err.last_attempt.attempt_number}")
64 | err = err.last_attempt.exception()
65 | err = str(err).replace('>', '').replace('<', '')
66 | if "User rate limit exceeded" in err:
67 | msg = "User rate limit exceeded."
68 | elif "File not found" in err:
69 | if not self.alt_auth and self.use_sa:
70 | self.alt_auth = True
71 | self.use_sa = False
72 | LOGGER.error('File not found. Trying with token.pickle...')
73 | return self.clone(link)
74 | msg = "File not found."
75 | else:
76 | msg = f"Error.\n{err}"
77 | async_to_sync(self.listener.onUploadError, msg)
78 | return None, None, None, None, None, None
79 |
80 | def __cloneFolder(self, folder_name, folder_id, dest_id):
81 | LOGGER.info(f"Syncing: {folder_name}")
82 | files = self.getFilesByFolderId(folder_id)
83 | if len(files) == 0:
84 | return dest_id
85 | if self.listener.user_dict.get('excluded_extensions', False):
86 | extension_filter = self.listener.user_dict['excluded_extensions']
87 | elif 'excluded_extensions' not in self.listener.user_dict:
88 | extension_filter = GLOBAL_EXTENSION_FILTER
89 | else:
90 | extension_filter = ['aria2', '!qB']
91 | for file in files:
92 | if file.get('mimeType') == self.G_DRIVE_DIR_MIME_TYPE:
93 | self.total_folders += 1
94 | file_path = ospath.join(folder_name, file.get('name'))
95 | current_dir_id = self.create_directory(
96 | file.get('name'), dest_id)
97 | self.__cloneFolder(file_path, file.get('id'), current_dir_id)
98 | elif not file.get('name').lower().endswith(tuple(extension_filter)):
99 | self.total_files += 1
100 | self.__copyFile(file.get('id'), dest_id)
101 | self.proc_bytes += int(file.get('size', 0))
102 | self.total_time = int(time() - self.__start_time)
103 | if self.is_cancelled:
104 | break
105 |
106 | @retry(wait=wait_exponential(multiplier=2, min=3, max=6), stop=stop_after_attempt(3),
107 | retry=retry_if_exception_type(Exception))
108 | def __copyFile(self, file_id, dest_id):
109 | body = {'parents': [dest_id]}
110 | try:
111 | return self.service.files().copy(fileId=file_id, body=body, supportsAllDrives=True).execute()
112 | except HttpError as err:
113 | if err.resp.get('content-type', '').startswith('application/json'):
114 | reason = eval(err.content).get(
115 | 'error').get('errors')[0].get('reason')
116 | if reason not in ['userRateLimitExceeded', 'dailyLimitExceeded', 'cannotCopyFile']:
117 | raise err
118 | if reason == 'cannotCopyFile':
119 | LOGGER.error(err)
120 | elif self.use_sa:
121 | if self.sa_count >= self.sa_number:
122 | LOGGER.info(
123 | f"Reached maximum number of service accounts switching, which is {self.sa_count}")
124 | raise err
125 | else:
126 | if self.is_cancelled:
127 | return
128 | self.switchServiceAccount()
129 | return self.__copyFile(file_id, dest_id)
130 | else:
131 | LOGGER.error(f"Got: {reason}")
132 | raise err
133 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/gdrive_utlis/download.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from logging import getLogger
3 | from os import makedirs, path as ospath
4 | from io import FileIO
5 | from googleapiclient.errors import HttpError
6 | from googleapiclient.http import MediaIoBaseDownload
7 | from tenacity import retry, wait_exponential, stop_after_attempt, retry_if_exception_type, RetryError
8 |
9 | from bot import GLOBAL_EXTENSION_FILTER
10 | from bot.helper.ext_utils.bot_utils import setInterval
11 | from bot.helper.ext_utils.bot_utils import async_to_sync
12 | from bot.helper.mirror_utils.gdrive_utlis.helper import GoogleDriveHelper
13 |
14 | LOGGER = getLogger(__name__)
15 |
16 |
17 | class gdDownload(GoogleDriveHelper):
18 |
19 | def __init__(self, name, path, listener):
20 | super().__init__(listener, name)
21 | self.__updater = None
22 | self.__path = path
23 | self.is_downloading = True
24 |
25 | def download(self, link):
26 | file_id = self.getIdFromUrl(link, self.listener.user_id)
27 | self.service = self.authorize()
28 | self.__updater = setInterval(self.update_interval, self.progress)
29 | try:
30 | meta = self.getFileMetadata(file_id)
31 | if meta.get("mimeType") == self.G_DRIVE_DIR_MIME_TYPE:
32 | self.__download_folder(file_id, self.__path, self.name)
33 | else:
34 | makedirs(self.__path, exist_ok=True)
35 | self.__download_file(file_id, self.__path,
36 | self.name, meta.get('mimeType'))
37 | except Exception as err:
38 | if isinstance(err, RetryError):
39 | LOGGER.info(
40 | f"Total Attempts: {err.last_attempt.attempt_number}")
41 | err = err.last_attempt.exception()
42 | err = str(err).replace('>', '').replace('<', '')
43 | if "downloadQuotaExceeded" in err:
44 | err = "Download Quota Exceeded."
45 | elif "File not found" in err:
46 | if not self.alt_auth and self.use_sa:
47 | self.alt_auth = True
48 | self.use_sa = False
49 | LOGGER.error('File not found. Trying with token.pickle...')
50 | self.__updater.cancel()
51 | return self.download(link)
52 | err = 'File not found!'
53 | async_to_sync(self.listener.onDownloadError, err)
54 | self.is_cancelled = True
55 | finally:
56 | self.__updater.cancel()
57 | if self.is_cancelled:
58 | return
59 | async_to_sync(self.listener.onDownloadComplete)
60 |
61 | def __download_folder(self, folder_id, path, folder_name):
62 | folder_name = folder_name.replace('/', '')
63 | if not ospath.exists(f"{path}/{folder_name}"):
64 | makedirs(f"{path}/{folder_name}")
65 | path += f"/{folder_name}"
66 | result = self.getFilesByFolderId(folder_id)
67 | if len(result) == 0:
68 | return
69 | if self.listener.user_dict.get('excluded_extensions', False):
70 | extension_filter = self.listener.user_dict['excluded_extensions']
71 | elif 'excluded_extensions' not in self.listener.user_dict:
72 | extension_filter = GLOBAL_EXTENSION_FILTER
73 | else:
74 | extension_filter = ['aria2', '!qB']
75 | result = sorted(result, key=lambda k: k['name'])
76 | for item in result:
77 | file_id = item['id']
78 | filename = item['name']
79 | shortcut_details = item.get('shortcutDetails')
80 | if shortcut_details is not None:
81 | file_id = shortcut_details['targetId']
82 | mime_type = shortcut_details['targetMimeType']
83 | else:
84 | mime_type = item.get('mimeType')
85 | if mime_type == self.G_DRIVE_DIR_MIME_TYPE:
86 | self.__download_folder(file_id, path, filename)
87 | elif not ospath.isfile(f"{path}{filename}") and not filename.lower().endswith(tuple(extension_filter)):
88 | self.__download_file(file_id, path, filename, mime_type)
89 | if self.is_cancelled:
90 | break
91 |
92 | @retry(wait=wait_exponential(multiplier=2, min=3, max=6), stop=stop_after_attempt(3),
93 | retry=(retry_if_exception_type(Exception)))
94 | def __download_file(self, file_id, path, filename, mime_type):
95 | request = self.service.files().get_media(
96 | fileId=file_id, supportsAllDrives=True)
97 | filename = filename.replace('/', '')
98 | if len(filename.encode()) > 255:
99 | ext = ospath.splitext(filename)[1]
100 | filename = f"{filename[:245]}{ext}"
101 | if self.name.endswith(ext):
102 | self.name = filename
103 | if self.is_cancelled:
104 | return
105 | fh = FileIO(f"{path}/{filename}", 'wb')
106 | downloader = MediaIoBaseDownload(
107 | fh, request, chunksize=100 * 1024 * 1024)
108 | done = False
109 | retries = 0
110 | while not done:
111 | if self.is_cancelled:
112 | fh.close()
113 | break
114 | try:
115 | self.status, done = downloader.next_chunk()
116 | except HttpError as err:
117 | if err.resp.status in [500, 502, 503, 504] and retries < 10:
118 | retries += 1
119 | continue
120 | if err.resp.get('content-type', '').startswith('application/json'):
121 | reason = eval(err.content).get(
122 | 'error').get('errors')[0].get('reason')
123 | if reason not in [
124 | 'downloadQuotaExceeded',
125 | 'dailyLimitExceeded',
126 | ]:
127 | raise err
128 | if self.use_sa:
129 | if self.sa_count >= self.sa_number:
130 | LOGGER.info(
131 | f"Reached maximum number of service accounts switching, which is {self.sa_count}")
132 | raise err
133 | else:
134 | if self.is_cancelled:
135 | return
136 | self.switchServiceAccount()
137 | LOGGER.info(f"Got: {reason}, Trying Again...")
138 | return self.__download_file(file_id, path, filename, mime_type)
139 | else:
140 | LOGGER.error(f"Got: {reason}")
141 | raise err
142 | self.file_processed_bytes = 0
143 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/gdrive_utlis/search.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from logging import getLogger
3 | from urllib.parse import quote as rquote
4 |
5 | from bot import DRIVES_NAMES, DRIVES_IDS, INDEX_URLS, user_data
6 | from bot.helper.ext_utils.bot_utils import get_readable_file_size
7 | from bot.helper.mirror_utils.gdrive_utlis.helper import GoogleDriveHelper
8 |
9 | LOGGER = getLogger(__name__)
10 |
11 |
12 | class gdSearch(GoogleDriveHelper):
13 |
14 | def __init__(self, stopDup=False, noMulti=False, isRecursive=True, itemType=''):
15 | super().__init__()
16 | self.__stopDup = stopDup
17 | self.__noMulti = noMulti
18 | self.__isRecursive = isRecursive
19 | self.__itemType = itemType
20 |
21 | def __drive_query(self, dirId, fileName, isRecursive):
22 | try:
23 | if isRecursive:
24 | if self.__stopDup:
25 | query = f"name = '{fileName}' and "
26 | else:
27 | fileName = fileName.split()
28 | query = "".join(
29 | f"name contains '{name}' and "
30 | for name in fileName
31 | if name != ''
32 | )
33 | if self.__itemType == "files":
34 | query += f"mimeType != '{self.G_DRIVE_DIR_MIME_TYPE}' and "
35 | elif self.__itemType == "folders":
36 | query += f"mimeType = '{self.G_DRIVE_DIR_MIME_TYPE}' and "
37 | query += "trashed = false"
38 | if dirId == "root":
39 | return self.service.files().list(q=f"{query} and 'me' in owners",
40 | pageSize=200, spaces='drive',
41 | fields='files(id, name, mimeType, size, parents)',
42 | orderBy='folder, name asc').execute()
43 | else:
44 | return self.service.files().list(supportsAllDrives=True, includeItemsFromAllDrives=True,
45 | driveId=dirId, q=query, spaces='drive', pageSize=150,
46 | fields='files(id, name, mimeType, size, teamDriveId, parents)',
47 | corpora='drive', orderBy='folder, name asc').execute()
48 | else:
49 | if self.__stopDup:
50 | query = f"'{dirId}' in parents and name = '{fileName}' and "
51 | else:
52 | query = f"'{dirId}' in parents and "
53 | fileName = fileName.split()
54 | for name in fileName:
55 | if name != '':
56 | query += f"name contains '{name}' and "
57 | if self.__itemType == "files":
58 | query += f"mimeType != '{self.G_DRIVE_DIR_MIME_TYPE}' and "
59 | elif self.__itemType == "folders":
60 | query += f"mimeType = '{self.G_DRIVE_DIR_MIME_TYPE}' and "
61 | query += "trashed = false"
62 | return self.service.files().list(supportsAllDrives=True, includeItemsFromAllDrives=True,
63 | q=query, spaces='drive', pageSize=150,
64 | fields='files(id, name, mimeType, size)',
65 | orderBy='folder, name asc').execute()
66 | except Exception as err:
67 | err = str(err).replace('>', '').replace('<', '')
68 | LOGGER.error(err)
69 | return {'files': []}
70 |
71 | def drive_list(self, fileName, target_id='', user_id=''):
72 | if target_id.startswith('mtp:'):
73 | drives = self.get_user_drive(target_id, user_id)
74 | else:
75 | drives = zip(DRIVES_NAMES, DRIVES_IDS, INDEX_URLS)
76 | msg = ""
77 | fileName = self.escapes(str(fileName))
78 | contents_no = 0
79 | telegraph_content = []
80 | Title = False
81 | if not target_id.startswith('mtp:') and len(DRIVES_IDS) > 1:
82 | self.use_sa = False
83 | self.service = self.authorize()
84 | for drive_name, dir_id, index_url in drives:
85 | isRecur = False if self.__isRecursive and len(
86 | dir_id) > 23 else self.__isRecursive
87 | response = self.__drive_query(dir_id, fileName, isRecur)
88 | if not response["files"]:
89 | if self.__noMulti:
90 | break
91 | else:
92 | continue
93 | if not Title:
94 | msg += f'Search Result For {fileName}
'
95 | Title = True
96 | if drive_name:
97 | msg += f"╾────────────╼
{drive_name}
╾────────────╼
"
98 | for file in response.get('files', []):
99 | mime_type = file.get('mimeType')
100 | if mime_type == self.G_DRIVE_DIR_MIME_TYPE:
101 | furl = self.G_DRIVE_DIR_BASE_DOWNLOAD_URL.format(
102 | file.get('id'))
103 | msg += f"📁 {file.get('name')}
(folder)
"
104 | msg += f"Drive Link"
105 | if index_url:
106 | url = f'{index_url}findpath?id={file.get("id")}'
107 | msg += f' | Index Link'
108 | elif mime_type == 'application/vnd.google-apps.shortcut':
109 | furl = self.G_DRIVE_DIR_BASE_DOWNLOAD_URL.format(
110 | file.get('id'))
111 | msg += f"⁍{file.get('name')}" \
112 | f" (shortcut)"
113 | else:
114 | furl = self.G_DRIVE_BASE_DOWNLOAD_URL.format(
115 | file.get('id'))
116 | msg += f"📄 {file.get('name')}
({get_readable_file_size(int(file.get('size', 0)))})
"
117 | msg += f"Drive Link"
118 | if index_url:
119 | url = f'{index_url}findpath?id={file.get("id")}'
120 | msg += f' | Index Link'
121 | if mime_type.startswith(('image', 'video', 'audio')):
122 | urlv = f'{index_url}findpath?id={file.get("id")}&view=true'
123 | msg += f' | View Link'
124 | msg += '
'
125 | contents_no += 1
126 | if len(msg.encode('utf-8')) > 39000:
127 | telegraph_content.append(msg)
128 | msg = ''
129 | if self.__noMulti:
130 | break
131 |
132 | if msg != '':
133 | telegraph_content.append(msg)
134 |
135 | return telegraph_content, contents_no
136 |
137 | def get_user_drive(self, target_id, user_id):
138 | dest_id = target_id.lstrip('mtp:')
139 | self.token_path = f'tokens/{user_id}.pickle'
140 | self.use_sa = False
141 | user_dict = user_data.get(user_id, {})
142 | INDEX = user_dict['index_url'] if user_dict.get('index_url') else ''
143 | return [('User Choice', dest_id, INDEX)]
144 |
--------------------------------------------------------------------------------
/bot/helper/ext_utils/db_handler.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from aiofiles.os import path as aiopath, makedirs
3 | from aiofiles import open as aiopen
4 | from motor.motor_asyncio import AsyncIOMotorClient
5 | from pymongo.errors import PyMongoError
6 | from dotenv import dotenv_values
7 |
8 | from bot import DATABASE_URL, user_data, rss_dict, LOGGER, bot_id, config_dict, aria2_options, qbit_options, bot_loop
9 |
10 |
11 | class DbManger:
12 | def __init__(self):
13 | self.__err = False
14 | self.__db = None
15 | self.__conn = None
16 | self.__connect()
17 |
18 | def __connect(self):
19 | try:
20 | self.__conn = AsyncIOMotorClient(DATABASE_URL)
21 | self.__db = self.__conn.mlsb
22 | except PyMongoError as e:
23 | LOGGER.error(f"Error in DB connection: {e}")
24 | self.__err = True
25 |
26 | async def db_load(self):
27 | if self.__err:
28 | return
29 | # Save bot settings
30 | await self.__db.settings.config.update_one({'_id': bot_id}, {'$set': config_dict}, upsert=True)
31 | # Save Aria2c options
32 | if await self.__db.settings.aria2c.find_one({'_id': bot_id}) is None:
33 | await self.__db.settings.aria2c.update_one({'_id': bot_id}, {'$set': aria2_options}, upsert=True)
34 | # Save qbittorrent options
35 | if await self.__db.settings.qbittorrent.find_one({'_id': bot_id}) is None:
36 | await self.__db.settings.qbittorrent.update_one({'_id': bot_id}, {'$set': qbit_options}, upsert=True)
37 | # User Data
38 | if await self.__db.users.find_one():
39 | rows = self.__db.users.find({})
40 | async for row in rows:
41 | uid = row['_id']
42 | del row['_id']
43 | thumb_path = f'Thumbnails/{uid}.jpg'
44 | rclone_config_path = f'rclone/{uid}.conf'
45 | token_path = f'tokens/{uid}.pickle'
46 | if row.get('thumb'):
47 | if not await aiopath.exists('Thumbnails'):
48 | await makedirs('Thumbnails')
49 | async with aiopen(thumb_path, 'wb+') as f:
50 | await f.write(row['thumb'])
51 | row['thumb'] = thumb_path
52 | if row.get('rclone_config'):
53 | if not await aiopath.exists('rclone'):
54 | await makedirs('rclone')
55 | async with aiopen(rclone_config_path, 'wb+') as f:
56 | await f.write(row['rclone_config'])
57 | row['rclone_config'] = rclone_config_path
58 | if row.get('token_pickle'):
59 | if not await aiopath.exists('tokens'):
60 | await makedirs('tokens')
61 | async with aiopen(token_path, 'wb+') as f:
62 | await f.write(row['token_pickle'])
63 | row['token_pickle'] = token_path
64 | user_data[uid] = row
65 | LOGGER.info("Users data has been imported from Database")
66 | # Rss Data
67 | if await self.__db.rss[bot_id].find_one():
68 | # return a dict ==> {_id, title: {link, last_feed, last_name, inf, exf, command, paused}
69 | rows = self.__db.rss[bot_id].find({})
70 | async for row in rows:
71 | user_id = row['_id']
72 | del row['_id']
73 | rss_dict[user_id] = row
74 | LOGGER.info("Rss data has been imported from Database.")
75 | self.__conn.close
76 |
77 | async def update_deploy_config(self):
78 | if self.__err:
79 | return
80 | current_config = dict(dotenv_values('config.env'))
81 | await self.__db.settings.deployConfig.replace_one({'_id': bot_id}, current_config, upsert=True)
82 | self.__conn.close
83 |
84 | async def update_config(self, dict_):
85 | if self.__err:
86 | return
87 | await self.__db.settings.config.update_one({'_id': bot_id}, {'$set': dict_}, upsert=True)
88 | self.__conn.close
89 |
90 | async def update_aria2(self, key, value):
91 | if self.__err:
92 | return
93 | await self.__db.settings.aria2c.update_one({'_id': bot_id}, {'$set': {key: value}}, upsert=True)
94 | self.__conn.close
95 |
96 | async def update_qbittorrent(self, key, value):
97 | if self.__err:
98 | return
99 | await self.__db.settings.qbittorrent.update_one({'_id': bot_id}, {'$set': {key: value}}, upsert=True)
100 | self.__conn.close
101 |
102 | async def update_private_file(self, path):
103 | if self.__err:
104 | return
105 | if await aiopath.exists(path):
106 | async with aiopen(path, 'rb+') as pf:
107 | pf_bin = await pf.read()
108 | else:
109 | pf_bin = ''
110 | path = path.replace('.', '__')
111 | await self.__db.settings.files.update_one({'_id': bot_id}, {'$set': {path: pf_bin}}, upsert=True)
112 | if path == 'config.env':
113 | await self.update_deploy_config()
114 | else:
115 | self.__conn.close
116 |
117 | async def update_user_data(self, user_id):
118 | if self.__err:
119 | return
120 | data = user_data[user_id]
121 | if data.get('thumb'):
122 | del data['thumb']
123 | if data.get('rclone_config'):
124 | del data['rclone_config']
125 | if data.get('token_pickle'):
126 | del data['token_pickle']
127 | await self.__db.users.replace_one({'_id': user_id}, data, upsert=True)
128 | self.__conn.close
129 |
130 | async def update_user_doc(self, user_id, key, path=''):
131 | if self.__err:
132 | return
133 | if path:
134 | async with aiopen(path, 'rb+') as doc:
135 | doc_bin = await doc.read()
136 | else:
137 | doc_bin = ''
138 | await self.__db.users.update_one({'_id': user_id}, {'$set': {key: doc_bin}}, upsert=True)
139 | self.__conn.close
140 |
141 | async def rss_update_all(self):
142 | if self.__err:
143 | return
144 | for user_id in list(rss_dict.keys()):
145 | await self.__db.rss[bot_id].replace_one({'_id': user_id}, rss_dict[user_id], upsert=True)
146 | self.__conn.close
147 |
148 | async def rss_update(self, user_id):
149 | if self.__err:
150 | return
151 | await self.__db.rss[bot_id].replace_one({'_id': user_id}, rss_dict[user_id], upsert=True)
152 | self.__conn.close
153 |
154 | async def rss_delete(self, user_id):
155 | if self.__err:
156 | return
157 | await self.__db.rss[bot_id].delete_one({'_id': user_id})
158 | self.__conn.close
159 |
160 | async def add_incomplete_task(self, cid, link, tag):
161 | if self.__err:
162 | return
163 | await self.__db.tasks[bot_id].insert_one({'_id': link, 'cid': cid, 'tag': tag})
164 | self.__conn.close
165 |
166 | async def rm_complete_task(self, link):
167 | if self.__err:
168 | return
169 | await self.__db.tasks[bot_id].delete_one({'_id': link})
170 | self.__conn.close
171 |
172 | async def trunc_table(self, name):
173 | if self.__err:
174 | return
175 | await self.__db[name][bot_id].drop()
176 | self.__conn.close
177 |
178 |
179 | if DATABASE_URL:
180 | bot_loop.run_until_complete(DbManger().db_load())
181 |
--------------------------------------------------------------------------------
/bot/helper/listeners/qbit_listener.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from asyncio import sleep
3 | from time import time
4 |
5 | from bot import download_dict, download_dict_lock, get_client, QbInterval, config_dict, QbTorrents, qb_listener_lock, LOGGER, bot_loop
6 | from bot.helper.mirror_utils.status_utils.qbit_status import QbittorrentStatus
7 | from bot.helper.switch_helper.message_utils import update_all_messages
8 | from bot.helper.ext_utils.bot_utils import get_readable_time, getDownloadByGid, new_task, sync_to_async
9 | from bot.helper.ext_utils.fs_utils import clean_unwanted
10 | from bot.helper.ext_utils.task_manager import stop_duplicate_check
11 |
12 |
13 | async def __remove_torrent(client, hash_, tag):
14 | await sync_to_async(client.torrents_delete, torrent_hashes=hash_, delete_files=True)
15 | async with qb_listener_lock:
16 | if tag in QbTorrents:
17 | del QbTorrents[tag]
18 | await sync_to_async(client.torrents_delete_tags, tags=tag)
19 |
20 |
21 | @new_task
22 | async def __onDownloadError(err, tor, button=None):
23 | LOGGER.info(f"Cancelling Download: {tor.name}")
24 | ext_hash = tor.hash
25 | download = await getDownloadByGid(ext_hash[:12])
26 | if not hasattr(download, 'client'):
27 | return
28 | listener = download.listener()
29 | client = download.client()
30 | await listener.onDownloadError(err, button)
31 | await sync_to_async(client.torrents_pause, torrent_hashes=ext_hash)
32 | await sleep(0.3)
33 | await __remove_torrent(client, ext_hash, tor.tags)
34 |
35 |
36 | @new_task
37 | async def __onSeedFinish(tor):
38 | ext_hash = tor.hash
39 | LOGGER.info(f"Cancelling Seed: {tor.name}")
40 | download = await getDownloadByGid(ext_hash[:12])
41 | if not hasattr(download, 'client'):
42 | return
43 | listener = download.listener()
44 | client = download.client()
45 | msg = f"Seeding stopped with Ratio: {round(tor.ratio, 3)} and Time: {get_readable_time(tor.seeding_time)}"
46 | await listener.onUploadError(msg)
47 | await __remove_torrent(client, ext_hash, tor.tags)
48 |
49 |
50 | @new_task
51 | async def __stop_duplicate(tor):
52 | download = await getDownloadByGid(tor.hash[:12])
53 | if not hasattr(download, 'listener'):
54 | return
55 | listener = download.listener()
56 | name = tor.content_path.rsplit('/', 1)[-1].rsplit('.!qB', 1)[0]
57 | msg, button = await stop_duplicate_check(name, listener)
58 | if msg:
59 | __onDownloadError(msg, tor, button)
60 |
61 |
62 | @new_task
63 | async def __onDownloadComplete(tor):
64 | ext_hash = tor.hash
65 | tag = tor.tags
66 | await sleep(2)
67 | download = await getDownloadByGid(ext_hash[:12])
68 | if not hasattr(download, 'client'):
69 | return
70 | listener = download.listener()
71 | client = download.client()
72 | if not listener.seed:
73 | await sync_to_async(client.torrents_pause, torrent_hashes=ext_hash)
74 | if listener.select:
75 | await clean_unwanted(listener.dir)
76 | await listener.onDownloadComplete()
77 | client = await sync_to_async(get_client)
78 | if listener.seed:
79 | async with download_dict_lock:
80 | if listener.uid in download_dict:
81 | removed = False
82 | download_dict[listener.uid] = QbittorrentStatus(listener, True)
83 | else:
84 | removed = True
85 | if removed:
86 | await __remove_torrent(client, ext_hash, tag)
87 | return
88 | async with qb_listener_lock:
89 | if tag in QbTorrents:
90 | QbTorrents[tag]['seeding'] = True
91 | else:
92 | return
93 | await update_all_messages()
94 | LOGGER.info(f"Seeding started: {tor.name} - Hash: {ext_hash}")
95 | await sync_to_async(client.auth_log_out)
96 | else:
97 | await __remove_torrent(client, ext_hash, tag)
98 |
99 |
100 | async def __qb_listener():
101 | client = await sync_to_async(get_client)
102 | while True:
103 | async with qb_listener_lock:
104 | try:
105 | if len(await sync_to_async(client.torrents_info)) == 0:
106 | QbInterval.clear()
107 | await sync_to_async(client.auth_log_out)
108 | break
109 | for tor_info in await sync_to_async(client.torrents_info):
110 | tag = tor_info.tags
111 | if tag not in QbTorrents:
112 | continue
113 | state = tor_info.state
114 | if state == "metaDL":
115 | TORRENT_TIMEOUT = config_dict['TORRENT_TIMEOUT']
116 | QbTorrents[tag]['stalled_time'] = time()
117 | if TORRENT_TIMEOUT and time() - tor_info.added_on >= TORRENT_TIMEOUT:
118 | __onDownloadError("Dead Torrent!", tor_info)
119 | else:
120 | await sync_to_async(client.torrents_reannounce, torrent_hashes=tor_info.hash)
121 | elif state == "downloading":
122 | QbTorrents[tag]['stalled_time'] = time()
123 | if config_dict['STOP_DUPLICATE'] and not QbTorrents[tag]['stop_dup_check']:
124 | QbTorrents[tag]['stop_dup_check'] = True
125 | __stop_duplicate(tor_info)
126 | elif state == "stalledDL":
127 | TORRENT_TIMEOUT = config_dict['TORRENT_TIMEOUT']
128 | if not QbTorrents[tag]['rechecked'] and 0.99989999999999999 < tor_info.progress < 1:
129 | msg = f"Force recheck - Name: {tor_info.name} Hash: "
130 | msg += f"{tor_info.hash} Downloaded Bytes: {tor_info.downloaded} "
131 | msg += f"Size: {tor_info.size} Total Size: {tor_info.total_size}"
132 | LOGGER.warning(msg)
133 | await sync_to_async(client.torrents_recheck, torrent_hashes=tor_info.hash)
134 | QbTorrents[tag]['rechecked'] = True
135 | elif TORRENT_TIMEOUT and time() - QbTorrents[tag]['stalled_time'] >= TORRENT_TIMEOUT:
136 | __onDownloadError("Dead Torrent!", tor_info)
137 | else:
138 | await sync_to_async(client.torrents_reannounce, torrent_hashes=tor_info.hash)
139 | elif state == "missingFiles":
140 | await sync_to_async(client.torrents_recheck, torrent_hashes=tor_info.hash)
141 | elif state == "error":
142 | __onDownloadError(
143 | "No enough space for this torrent on device", tor_info)
144 | elif tor_info.completion_on != 0 and not QbTorrents[tag]['uploaded'] and \
145 | state not in ['checkingUP', 'checkingDL', 'checkingResumeData']:
146 | QbTorrents[tag]['uploaded'] = True
147 | __onDownloadComplete(tor_info)
148 | elif state in ['pausedUP', 'pausedDL'] and QbTorrents[tag]['seeding']:
149 | QbTorrents[tag]['seeding'] = False
150 | __onSeedFinish(tor_info)
151 | except Exception as e:
152 | LOGGER.error(str(e))
153 | client = await sync_to_async(get_client)
154 | await sleep(3)
155 |
156 |
157 | async def onDownloadStart(tag):
158 | async with qb_listener_lock:
159 | QbTorrents[tag] = {'stalled_time': time(
160 | ), 'stop_dup_check': False, 'rechecked': False, 'uploaded': False, 'seeding': False}
161 | if not QbInterval:
162 | periodic = bot_loop.create_task(__qb_listener())
163 | QbInterval.append(periodic)
164 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/download_utils/mega_download.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from random import SystemRandom
3 | from string import ascii_letters, digits
4 | from aiofiles.os import makedirs
5 | from asyncio import Event
6 | from mega import (MegaApi, MegaListener, MegaRequest, MegaTransfer, MegaError)
7 |
8 | from bot import LOGGER, config_dict, download_dict_lock, download_dict, non_queued_dl, queue_dict_lock
9 | from bot.helper.switch_helper.message_utils import sendMessage, sendStatusMessage
10 | from bot.helper.ext_utils.bot_utils import get_mega_link_type, async_to_sync, sync_to_async
11 | from bot.helper.mirror_utils.status_utils.mega_download_status import MegaDownloadStatus
12 | from bot.helper.mirror_utils.status_utils.queue_status import QueueStatus
13 | from bot.helper.ext_utils.task_manager import is_queued, stop_duplicate_check
14 |
15 |
16 | class MegaAppListener(MegaListener):
17 | _NO_EVENT_ON = (MegaRequest.TYPE_LOGIN, MegaRequest.TYPE_FETCH_NODES)
18 | NO_ERROR = "no error"
19 |
20 | def __init__(self, continue_event: Event, listener):
21 | self.continue_event = continue_event
22 | self.node = None
23 | self.public_node = None
24 | self.listener = listener
25 | self.is_cancelled = False
26 | self.error = None
27 | self.__bytes_transferred = 0
28 | self.__speed = 0
29 | self.__name = ''
30 | super().__init__()
31 |
32 | @property
33 | def speed(self):
34 | return self.__speed
35 |
36 | @property
37 | def downloaded_bytes(self):
38 | return self.__bytes_transferred
39 |
40 | def onRequestFinish(self, api, request, error):
41 | if str(error).lower() != "no error":
42 | self.error = error.copy()
43 | LOGGER.error(f'Mega onRequestFinishError: {self.error}')
44 | self.continue_event.set()
45 | return
46 | request_type = request.getType()
47 | if request_type == MegaRequest.TYPE_LOGIN:
48 | api.fetchNodes()
49 | elif request_type == MegaRequest.TYPE_GET_PUBLIC_NODE:
50 | self.public_node = request.getPublicMegaNode()
51 | self.__name = self.public_node.getName()
52 | elif request_type == MegaRequest.TYPE_FETCH_NODES:
53 | LOGGER.info("Fetching Root Node.")
54 | self.node = api.getRootNode()
55 | self.__name = self.node.getName()
56 | LOGGER.info(f"Node Name: {self.node.getName()}")
57 | if request_type not in self._NO_EVENT_ON or self.node and "cloud drive" not in self.__name.lower():
58 | self.continue_event.set()
59 |
60 | def onRequestTemporaryError(self, api, request, error: MegaError):
61 | LOGGER.error(f'Mega Request error in {error}')
62 | if not self.is_cancelled:
63 | self.is_cancelled = True
64 | async_to_sync(self.listener.onDownloadError,
65 | f"RequestTempError: {error.toString()}")
66 | self.error = error.toString()
67 | self.continue_event.set()
68 |
69 | def onTransferUpdate(self, api: MegaApi, transfer: MegaTransfer):
70 | if self.is_cancelled:
71 | api.cancelTransfer(transfer, None)
72 | self.continue_event.set()
73 | return
74 | self.__speed = transfer.getSpeed()
75 | self.__bytes_transferred = transfer.getTransferredBytes()
76 |
77 | def onTransferFinish(self, api: MegaApi, transfer: MegaTransfer, error):
78 | try:
79 | if self.is_cancelled:
80 | self.continue_event.set()
81 | elif transfer.isFinished() and (transfer.isFolderTransfer() or transfer.getFileName() == self.__name):
82 | async_to_sync(self.listener.onDownloadComplete)
83 | self.continue_event.set()
84 | except Exception as e:
85 | LOGGER.error(e)
86 |
87 | def onTransferTemporaryError(self, api, transfer, error):
88 | filen = transfer.getFileName()
89 | state = transfer.getState()
90 | errStr = error.toString()
91 | LOGGER.error(
92 | f'Mega download error in file {transfer} {filen}: {error}')
93 | if state in [1, 4]:
94 | # Sometimes MEGA (offical client) can't stream a node either and raises a temp failed error.
95 | # Don't break the transfer queue if transfer's in queued (1) or retrying (4) state [causes seg fault]
96 | return
97 |
98 | self.error = errStr
99 | if not self.is_cancelled:
100 | self.is_cancelled = True
101 | async_to_sync(self.listener.onDownloadError,
102 | f"TransferTempError: {errStr} ({filen})")
103 | self.continue_event.set()
104 |
105 | async def cancel_download(self):
106 | self.is_cancelled = True
107 | await self.listener.onDownloadError("Download Canceled by user")
108 |
109 |
110 | class AsyncExecutor:
111 |
112 | def __init__(self):
113 | self.continue_event = Event()
114 |
115 | async def do(self, function, args):
116 | self.continue_event.clear()
117 | await sync_to_async(function, *args)
118 | await self.continue_event.wait()
119 |
120 |
121 | async def add_mega_download(mega_link, path, listener, name):
122 | MEGA_EMAIL = config_dict['MEGA_EMAIL']
123 | MEGA_PASSWORD = config_dict['MEGA_PASSWORD']
124 |
125 | executor = AsyncExecutor()
126 | api = MegaApi(None, None, None, 'mirror-leech-switch-bot')
127 | folder_api = None
128 |
129 | mega_listener = MegaAppListener(executor.continue_event, listener)
130 | api.addListener(mega_listener)
131 |
132 | if MEGA_EMAIL and MEGA_PASSWORD:
133 | await executor.do(api.login, (MEGA_EMAIL, MEGA_PASSWORD))
134 |
135 | if get_mega_link_type(mega_link) == "file":
136 | await executor.do(api.getPublicNode, (mega_link,))
137 | node = mega_listener.public_node
138 | else:
139 | folder_api = MegaApi(None, None, None, 'mirror-leech-switch-bot')
140 | folder_api.addListener(mega_listener)
141 | await executor.do(folder_api.loginToFolder, (mega_link,))
142 | node = await sync_to_async(folder_api.authorizeNode, mega_listener.node)
143 | if mega_listener.error is not None:
144 | await sendMessage(listener.message, str(mega_listener.error))
145 | await executor.do(api.logout, ())
146 | if folder_api is not None:
147 | await executor.do(folder_api.logout, ())
148 | return
149 |
150 | name = name or node.getName()
151 | msg, button = await stop_duplicate_check(name, listener)
152 | if msg:
153 | await sendMessage(listener.message, msg, button)
154 | await executor.do(api.logout, ())
155 | if folder_api is not None:
156 | await executor.do(folder_api.logout, ())
157 | return
158 |
159 | gid = ''.join(SystemRandom().choices(ascii_letters + digits, k=8))
160 | size = api.getSize(node)
161 |
162 | added_to_queue, event = await is_queued(listener.uid)
163 | if added_to_queue:
164 | LOGGER.info(f"Added to Queue/Download: {name}")
165 | async with download_dict_lock:
166 | download_dict[listener.uid] = QueueStatus(
167 | name, size, gid, listener, 'Dl')
168 | await sendStatusMessage(listener.message)
169 | await event.wait()
170 | async with download_dict_lock:
171 | if listener.uid not in download_dict:
172 | await executor.do(api.logout, ())
173 | if folder_api is not None:
174 | await executor.do(folder_api.logout, ())
175 | return
176 | from_queue = True
177 | LOGGER.info(f'Start Queued Download from Mega: {name}')
178 | else:
179 | from_queue = False
180 |
181 | async with download_dict_lock:
182 | download_dict[listener.uid] = MegaDownloadStatus(
183 | name, size, gid, mega_listener, listener.message)
184 | async with queue_dict_lock:
185 | non_queued_dl.add(listener.uid)
186 |
187 | if from_queue:
188 | LOGGER.info(f'Start Queued Download from Mega: {name}')
189 | else:
190 | await sendStatusMessage(listener.message)
191 | LOGGER.info(f"Download from Mega: {name}")
192 |
193 | await makedirs(path, exist_ok=True)
194 | await executor.do(api.startDownload, (node, path, name, None, False, None))
195 | await executor.do(api.logout, ())
196 | if folder_api is not None:
197 | await executor.do(folder_api.logout, ())
198 |
--------------------------------------------------------------------------------
/bot/helper/listeners/aria2_listener.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from asyncio import sleep
3 | from time import time
4 | from aiofiles.os import remove as aioremove, path as aiopath
5 |
6 | from bot import aria2, download_dict_lock, download_dict, LOGGER, config_dict
7 | from bot.helper.mirror_utils.gdrive_utlis.search import gdSearch
8 | from bot.helper.mirror_utils.status_utils.aria2_status import Aria2Status
9 | from bot.helper.ext_utils.fs_utils import get_base_name, clean_unwanted
10 | from bot.helper.ext_utils.bot_utils import getDownloadByGid, new_thread, bt_selection_buttons, sync_to_async, get_telegraph_list, is_gdrive_id
11 | from bot.helper.switch_helper.message_utils import sendMessage, deleteMessage, update_all_messages
12 |
13 |
14 | @new_thread
15 | async def __onDownloadStarted(api, gid):
16 | download = await sync_to_async(api.get_download, gid)
17 | if download.is_metadata:
18 | LOGGER.info(f'onDownloadStarted: {gid} METADATA')
19 | await sleep(1)
20 | if dl := await getDownloadByGid(gid):
21 | listener = dl.listener()
22 | if listener.select:
23 | metamsg = "Downloading Metadata, wait then you can select files. Use torrent file to avoid this wait."
24 | meta = await sendMessage(listener.message, metamsg)
25 | while True:
26 | await sleep(0.5)
27 | if download.is_removed or download.followed_by_ids:
28 | await deleteMessage(meta)
29 | break
30 | download = download.live
31 | return
32 | else:
33 | LOGGER.info(f'onDownloadStarted: {download.name} - Gid: {gid}')
34 | await sleep(1)
35 |
36 | if dl := await getDownloadByGid(gid):
37 | if not hasattr(dl, 'listener'):
38 | LOGGER.warning(
39 | f"onDownloadStart: {gid}. STOP_DUPLICATE didn't pass since download completed earlier!")
40 | return
41 | listener = dl.listener()
42 | if listener.upDest.startswith('mtp:') and listener.user_dict('stop_duplicate', False) or not listener.upDest.startswith('mtp:') and config_dict['STOP_DUPLICATE']:
43 | if listener.isLeech or listener.select or not is_gdrive_id(listener.upDest):
44 | return
45 | download = await sync_to_async(api.get_download, gid)
46 | if not download.is_torrent:
47 | await sleep(2)
48 | download = download.live
49 | LOGGER.info('Checking File/Folder if already in Drive...')
50 | name = download.name
51 | if listener.compress:
52 | name = f"{name}.zip"
53 | elif listener.extract:
54 | try:
55 | name = get_base_name(name)
56 | except:
57 | name = None
58 | if name is not None:
59 | telegraph_content, contents_no = await sync_to_async(gdSearch(stopDup=True).drive_list, name, listener.upDest, listener.user_id)
60 | if telegraph_content:
61 | msg = f"File/Folder is already available in Drive.\nHere are {contents_no} list results:"
62 | button = await get_telegraph_list(telegraph_content)
63 | await listener.onDownloadError(msg, button)
64 | await sync_to_async(api.remove, [download], force=True, files=True)
65 |
66 |
67 | @new_thread
68 | async def __onDownloadComplete(api, gid):
69 | try:
70 | download = await sync_to_async(api.get_download, gid)
71 | except:
72 | return
73 | if download.followed_by_ids:
74 | new_gid = download.followed_by_ids[0]
75 | LOGGER.info(f'Gid changed from {gid} to {new_gid}')
76 | if dl := await getDownloadByGid(new_gid):
77 | listener = dl.listener()
78 | if config_dict['BASE_URL'] and listener.select:
79 | if not dl.queued:
80 | await sync_to_async(api.client.force_pause, new_gid)
81 | SBUTTONS = bt_selection_buttons(new_gid)
82 | msg = "Your download paused. Choose files then press Done Selecting button to start downloading."
83 | await sendMessage(listener.message, msg, SBUTTONS)
84 | elif download.is_torrent:
85 | if dl := await getDownloadByGid(gid):
86 | if hasattr(dl, 'listener') and dl.seeding:
87 | LOGGER.info(
88 | f"Cancelling Seed: {download.name} onDownloadComplete")
89 | listener = dl.listener()
90 | await listener.onUploadError(f"Seeding stopped with Ratio: {dl.ratio()} and Time: {dl.seeding_time()}")
91 | await sync_to_async(api.remove, [download], force=True, files=True)
92 | else:
93 | LOGGER.info(f"onDownloadComplete: {download.name} - Gid: {gid}")
94 | if dl := await getDownloadByGid(gid):
95 | listener = dl.listener()
96 | await listener.onDownloadComplete()
97 | await sync_to_async(api.remove, [download], force=True, files=True)
98 |
99 |
100 | @new_thread
101 | async def __onBtDownloadComplete(api, gid):
102 | seed_start_time = time()
103 | await sleep(1)
104 | download = await sync_to_async(api.get_download, gid)
105 | LOGGER.info(f"onBtDownloadComplete: {download.name} - Gid: {gid}")
106 | if dl := await getDownloadByGid(gid):
107 | listener = dl.listener()
108 | if listener.select:
109 | res = download.files
110 | for file_o in res:
111 | f_path = file_o.path
112 | if not file_o.selected and await aiopath.exists(f_path):
113 | try:
114 | await aioremove(f_path)
115 | except:
116 | pass
117 | await clean_unwanted(download.dir)
118 | if listener.seed:
119 | try:
120 | await sync_to_async(api.set_options, {'max-upload-limit': '0'}, [download])
121 | except Exception as e:
122 | LOGGER.error(
123 | f'{e} You are not able to seed because you added global option seed-time=0 without adding specific seed_time for this torrent GID: {gid}')
124 | else:
125 | try:
126 | await sync_to_async(api.client.force_pause, gid)
127 | except Exception as e:
128 | LOGGER.error(f"{e} GID: {gid}")
129 | await listener.onDownloadComplete()
130 | download = download.live
131 | if listener.seed:
132 | if download.is_complete:
133 | if dl := await getDownloadByGid(gid):
134 | LOGGER.info(f"Cancelling Seed: {download.name}")
135 | await listener.onUploadError(f"Seeding stopped with Ratio: {dl.ratio()} and Time: {dl.seeding_time()}")
136 | await sync_to_async(api.remove, [download], force=True, files=True)
137 | else:
138 | async with download_dict_lock:
139 | if listener.uid not in download_dict:
140 | await sync_to_async(api.remove, [download], force=True, files=True)
141 | return
142 | download_dict[listener.uid] = Aria2Status(
143 | gid, listener, True)
144 | download_dict[listener.uid].start_time = seed_start_time
145 | LOGGER.info(f"Seeding started: {download.name} - Gid: {gid}")
146 | await update_all_messages()
147 | else:
148 | await sync_to_async(api.remove, [download], force=True, files=True)
149 |
150 |
151 | @new_thread
152 | async def __onDownloadStopped(api, gid):
153 | await sleep(6)
154 | if dl := await getDownloadByGid(gid):
155 | listener = dl.listener()
156 | await listener.onDownloadError('Dead torrent!')
157 |
158 |
159 | @new_thread
160 | async def __onDownloadError(api, gid):
161 | LOGGER.info(f"onDownloadError: {gid}")
162 | error = "None"
163 | try:
164 | download = await sync_to_async(api.get_download, gid)
165 | error = download.error_message
166 | LOGGER.info(f"Download Error: {error}")
167 | except:
168 | pass
169 | if dl := await getDownloadByGid(gid):
170 | listener = dl.listener()
171 | await listener.onDownloadError(error)
172 |
173 |
174 | def start_aria2_listener():
175 | aria2.listen_to_notifications(threaded=False,
176 | on_download_start=__onDownloadStarted,
177 | on_download_error=__onDownloadError,
178 | on_download_stop=__onDownloadStopped,
179 | on_download_complete=__onDownloadComplete,
180 | on_bt_download_complete=__onBtDownloadComplete,
181 | timeout=60)
182 |
--------------------------------------------------------------------------------
/bot/helper/ext_utils/leech_utils.py:
--------------------------------------------------------------------------------
1 | from os import path as ospath
2 | from aiofiles.os import remove as aioremove, path as aiopath, mkdir
3 | from time import time
4 | from re import search as re_search
5 | from asyncio import create_subprocess_exec
6 | from asyncio.subprocess import PIPE
7 |
8 | from bot import LOGGER, MAX_SPLIT_SIZE, config_dict
9 | from bot.helper.ext_utils.bot_utils import cmd_exec
10 | from bot.helper.ext_utils.bot_utils import sync_to_async
11 | from bot.helper.ext_utils.fs_utils import ARCH_EXT, get_mime_type
12 |
13 |
14 | async def is_multi_streams(path):
15 | try:
16 | result = await cmd_exec(["ffprobe", "-hide_banner", "-loglevel", "error", "-print_format",
17 | "json", "-show_streams", path])
18 | if res := result[1]:
19 | LOGGER.warning(f'Get Video Streams: {res}')
20 | except Exception as e:
21 | LOGGER.error(f'Get Video Streams: {e}. Mostly File not found!')
22 | return False
23 | fields = eval(result[0]).get('streams')
24 | if fields is None:
25 | LOGGER.error(f"get_video_streams: {result}")
26 | return False
27 | videos = 0
28 | audios = 0
29 | for stream in fields:
30 | if stream.get('codec_type') == 'video':
31 | videos += 1
32 | elif stream.get('codec_type') == 'audio':
33 | audios += 1
34 | return videos > 1 or audios > 1
35 |
36 |
37 | async def get_media_info(path):
38 | try:
39 | result = await cmd_exec(["ffprobe", "-hide_banner", "-loglevel", "error", "-print_format",
40 | "json", "-show_format", path])
41 | if res := result[1]:
42 | LOGGER.warning(f'Get Media Info: {res}')
43 | except Exception as e:
44 | LOGGER.error(f'Get Media Info: {e}. Mostly File not found!')
45 | return 0, None, None
46 | fields = eval(result[0]).get('format')
47 | if fields is None:
48 | LOGGER.error(f"get_media_info: {result}")
49 | return 0, None, None
50 | duration = round(float(fields.get('duration', 0)))
51 | tags = fields.get('tags', {})
52 | artist = tags.get('artist') or tags.get('ARTIST') or tags.get('Artist')
53 | title = tags.get('title') or tags.get('TITLE') or tags.get('Title')
54 | return duration, artist, title
55 |
56 |
57 | async def get_document_type(path):
58 | is_video, is_audio, is_image = False, False, False
59 | if path.endswith(tuple(ARCH_EXT)) or re_search(r'.+(\.|_)(rar|7z|zip|bin)(\.0*\d+)?$', path):
60 | return is_video, is_audio, is_image
61 | mime_type = await sync_to_async(get_mime_type, path)
62 | if mime_type.startswith('audio'):
63 | return False, True, False
64 | if mime_type.startswith('image'):
65 | return False, False, True
66 | if not mime_type.startswith('video') and not mime_type.endswith('octet-stream'):
67 | return is_video, is_audio, is_image
68 | try:
69 | result = await cmd_exec(["ffprobe", "-hide_banner", "-loglevel", "error", "-print_format",
70 | "json", "-show_streams", path])
71 | if res := result[1]:
72 | LOGGER.warning(f'Get Document Type: {res}')
73 | except Exception as e:
74 | LOGGER.error(f'Get Document Type: {e}. Mostly File not found!')
75 | return is_video, is_audio, is_image
76 | fields = eval(result[0]).get('streams')
77 | if fields is None:
78 | LOGGER.error(f"get_document_type: {result}")
79 | return is_video, is_audio, is_image
80 | for stream in fields:
81 | if stream.get('codec_type') == 'video':
82 | is_video = True
83 | elif stream.get('codec_type') == 'audio':
84 | is_audio = True
85 | return is_video, is_audio, is_image
86 |
87 |
88 | async def take_ss(video_file, duration):
89 | des_dir = 'Thumbnails'
90 | if not await aiopath.exists(des_dir):
91 | await mkdir(des_dir)
92 | des_dir = ospath.join(des_dir, f"{time()}.jpg")
93 | if duration is None:
94 | duration = (await get_media_info(video_file))[0]
95 | if duration == 0:
96 | duration = 3
97 | duration = duration // 2
98 | cmd = ["ffmpeg", "-hide_banner", "-loglevel", "error", "-ss", str(duration),
99 | "-i", video_file, "-vf", "thumbnail", "-frames:v", "1", des_dir]
100 | status = await create_subprocess_exec(*cmd, stderr=PIPE)
101 | if await status.wait() != 0 or not await aiopath.exists(des_dir):
102 | err = (await status.stderr.read()).decode().strip()
103 | LOGGER.error(
104 | f'Error while extracting thumbnail. Name: {video_file} stderr: {err}')
105 | return None
106 | return des_dir
107 |
108 |
109 | async def split_file(path, size, file_, dirpath, split_size, listener, start_time=0, i=1, inLoop=False, multi_streams=True):
110 | if listener.suproc == 'cancelled' or listener.suproc is not None and listener.suproc.returncode == -9:
111 | return False
112 | if listener.seed and not listener.newDir:
113 | dirpath = f"{dirpath}/splited_files_mltb"
114 | if not await aiopath.exists(dirpath):
115 | await mkdir(dirpath)
116 | leech_split_size = listener.user_dict.get(
117 | 'split_size') or config_dict['LEECH_SPLIT_SIZE']
118 | leech_split_size = min(leech_split_size, MAX_SPLIT_SIZE)
119 | parts = -(-size // leech_split_size)
120 | if (listener.user_dict.get('equal_splits') or config_dict['EQUAL_SPLITS'] and 'equal_splits' not in listener.user_dict) and not inLoop:
121 | split_size = ((size + parts - 1) // parts) + 1000
122 | if (await get_document_type(path))[0]:
123 | if multi_streams:
124 | multi_streams = await is_multi_streams(path)
125 | duration = (await get_media_info(path))[0]
126 | base_name, extension = ospath.splitext(file_)
127 | split_size -= 5000000
128 | while i <= parts or start_time < duration - 4:
129 | parted_name = f"{base_name}.part{i:03}{extension}"
130 | out_path = ospath.join(dirpath, parted_name)
131 | cmd = ["ffmpeg", "-hide_banner", "-loglevel", "error", "-ss", str(start_time), "-i", path,
132 | "-fs", str(split_size), "-map", "0", "-map_chapters", "-1", "-async", "1", "-strict",
133 | "-2", "-c", "copy", out_path]
134 | if not multi_streams:
135 | del cmd[10]
136 | del cmd[10]
137 | if listener.suproc == 'cancelled' or listener.suproc is not None and listener.suproc.returncode == -9:
138 | return False
139 | listener.suproc = await create_subprocess_exec(*cmd, stderr=PIPE)
140 | code = await listener.suproc.wait()
141 | if code == -9:
142 | return False
143 | elif code != 0:
144 | err = (await listener.suproc.stderr.read()).decode().strip()
145 | try:
146 | await aioremove(out_path)
147 | except:
148 | pass
149 | if multi_streams:
150 | LOGGER.warning(
151 | f"{err}. Retrying without map, -map 0 not working in all situations. Path: {path}")
152 | return await split_file(path, size, file_, dirpath, split_size, listener, start_time, i, True, False)
153 | else:
154 | LOGGER.warning(
155 | f"{err}. Unable to split this video, if it's size less than {MAX_SPLIT_SIZE} will be uploaded as it is. Path: {path}")
156 | return "errored"
157 | out_size = await aiopath.getsize(out_path)
158 | if out_size > MAX_SPLIT_SIZE:
159 | dif = out_size - MAX_SPLIT_SIZE
160 | split_size -= dif + 5000000
161 | await aioremove(out_path)
162 | return await split_file(path, size, file_, dirpath, split_size, listener, start_time, i, True, )
163 | lpd = (await get_media_info(out_path))[0]
164 | if lpd == 0:
165 | LOGGER.error(
166 | f'Something went wrong while splitting, mostly file is corrupted. Path: {path}')
167 | break
168 | elif duration == lpd:
169 | LOGGER.warning(
170 | f"This file has been splitted with default stream and audio, so you will only see one part with less size from orginal one because it doesn't have all streams and audios. This happens mostly with MKV videos. Path: {path}")
171 | break
172 | elif lpd <= 3:
173 | await aioremove(out_path)
174 | break
175 | start_time += lpd - 3
176 | i += 1
177 | else:
178 | out_path = ospath.join(dirpath, f"{file_}.")
179 | listener.suproc = await create_subprocess_exec("split", "--numeric-suffixes=1", "--suffix-length=3",
180 | f"--bytes={split_size}", path, out_path, stderr=PIPE)
181 | code = await listener.suproc.wait()
182 | if code == -9:
183 | return False
184 | elif code != 0:
185 | err = (await listener.suproc.stderr.read()).decode().strip()
186 | LOGGER.error(err)
187 | return True
188 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/gdrive_utlis/helper.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from logging import getLogger, ERROR
3 | from pickle import load as pload
4 | from os import path as ospath, listdir
5 | from re import search as re_search
6 | from urllib.parse import parse_qs, urlparse
7 | from random import randrange
8 | from google.oauth2 import service_account
9 | from googleapiclient.discovery import build
10 | from tenacity import retry, wait_exponential, stop_after_attempt, retry_if_exception_type
11 |
12 | from bot import config_dict
13 | from bot.helper.ext_utils.bot_utils import is_gdrive_id
14 |
15 | LOGGER = getLogger(__name__)
16 | getLogger('googleapiclient.discovery').setLevel(ERROR)
17 |
18 |
19 | class GoogleDriveHelper:
20 |
21 | def __init__(self, listener=None, name=None):
22 | self.__OAUTH_SCOPE = ['https://www.googleapis.com/auth/drive']
23 | self.token_path = "token.pickle"
24 | self.G_DRIVE_DIR_MIME_TYPE = "application/vnd.google-apps.folder"
25 | self.G_DRIVE_BASE_DOWNLOAD_URL = "https://drive.google.com/uc?id={}&export=download"
26 | self.G_DRIVE_DIR_BASE_DOWNLOAD_URL = "https://drive.google.com/drive/folders/{}"
27 | self.is_uploading = False
28 | self.is_downloading = False
29 | self.is_cloning = False
30 | self.is_cancelled = False
31 | self.sa_index = 0
32 | self.sa_count = 1
33 | self.sa_number = 100
34 | self.alt_auth = False
35 | self.listener = listener
36 | self.service = None
37 | self.name = name
38 | self.total_files = 0
39 | self.total_folders = 0
40 | self.file_processed_bytes = 0
41 | self.proc_bytes = 0
42 | self.total_time = 0
43 | self.status = None
44 | self.update_interval = 3
45 | self.use_sa = config_dict['USE_SERVICE_ACCOUNTS']
46 |
47 | @property
48 | def speed(self):
49 | try:
50 | return self.proc_bytes / self.total_time
51 | except:
52 | return 0
53 |
54 | @property
55 | def processed_bytes(self):
56 | return self.proc_bytes
57 |
58 | async def progress(self):
59 | if self.status is not None:
60 | chunk_size = self.status.total_size * \
61 | self.status.progress() - self.file_processed_bytes
62 | self.file_processed_bytes = self.status.total_size * self.status.progress()
63 | self.proc_bytes += chunk_size
64 | self.total_time += self.update_interval
65 |
66 | def authorize(self):
67 | credentials = None
68 | if self.use_sa:
69 | json_files = listdir("accounts")
70 | self.sa_number = len(json_files)
71 | self.sa_index = randrange(self.sa_number)
72 | LOGGER.info(
73 | f"Authorizing with {json_files[self.sa_index]} service account")
74 | credentials = service_account.Credentials.from_service_account_file(
75 | f'accounts/{json_files[self.sa_index]}',
76 | scopes=self.__OAUTH_SCOPE)
77 | elif ospath.exists(self.token_path):
78 | LOGGER.info(f"Authorize with {self.token_path}")
79 | with open(self.token_path, 'rb') as f:
80 | credentials = pload(f)
81 | else:
82 | LOGGER.error('token.pickle not found!')
83 | return build('drive', 'v3', credentials=credentials, cache_discovery=False)
84 |
85 | def switchServiceAccount(self):
86 | if self.sa_index == self.sa_number - 1:
87 | self.sa_index = 0
88 | else:
89 | self.sa_index += 1
90 | self.sa_count += 1
91 | LOGGER.info(f"Switching to {self.sa_index} index")
92 | self.service = self.authorize()
93 |
94 | def getIdFromUrl(self, link, user_id):
95 | if link.startswith('mtp:'):
96 | self.use_sa = False
97 | link = link.lstrip('mtp:')
98 | self.token_path = f'tokens/{user_id}.pickle'
99 | if is_gdrive_id(link):
100 | return link
101 | if "folders" in link or "file" in link:
102 | regex = r"https:\/\/drive\.google\.com\/(?:drive(.*?)\/folders\/|file(.*?)?\/d\/)([-\w]+)"
103 | res = re_search(regex, link)
104 | if res is None:
105 | raise IndexError("G-Drive ID not found.")
106 | return res.group(3)
107 | parsed = urlparse(link)
108 | return parse_qs(parsed.query)['id'][0]
109 |
110 | @retry(wait=wait_exponential(multiplier=2, min=3, max=6), stop=stop_after_attempt(3),
111 | retry=retry_if_exception_type(Exception))
112 | def set_permission(self, file_id):
113 | permissions = {
114 | 'role': 'reader',
115 | 'type': 'anyone',
116 | 'value': None,
117 | 'withLink': True
118 | }
119 | return self.service.permissions().create(fileId=file_id, body=permissions, supportsAllDrives=True).execute()
120 |
121 | @retry(wait=wait_exponential(multiplier=2, min=3, max=6), stop=stop_after_attempt(3),
122 | retry=retry_if_exception_type(Exception))
123 | def getFileMetadata(self, file_id):
124 | return self.service.files().get(fileId=file_id, supportsAllDrives=True,
125 | fields='name, id, mimeType, size').execute()
126 |
127 | @retry(wait=wait_exponential(multiplier=2, min=3, max=6), stop=stop_after_attempt(3),
128 | retry=retry_if_exception_type(Exception))
129 | def getFilesByFolderId(self, folder_id, item_type=""):
130 | page_token = None
131 | files = []
132 | if not item_type:
133 | q = f"'{folder_id}' in parents and trashed = false"
134 | elif item_type == "folders":
135 | q = f"'{folder_id}' in parents and mimeType = '{self.G_DRIVE_DIR_MIME_TYPE}' and trashed = false"
136 | else:
137 | q = f"'{folder_id}' in parents and mimeType != '{self.G_DRIVE_DIR_MIME_TYPE}' and trashed = false"
138 | while True:
139 | response = self.service.files().list(supportsAllDrives=True, includeItemsFromAllDrives=True,
140 | q=q,
141 | spaces='drive', pageSize=200,
142 | fields='nextPageToken, files(id, name, mimeType, size, shortcutDetails)',
143 | orderBy='folder, name', pageToken=page_token).execute()
144 | files.extend(response.get('files', []))
145 | page_token = response.get('nextPageToken')
146 | if page_token is None:
147 | break
148 | return files
149 |
150 | @retry(wait=wait_exponential(multiplier=2, min=3, max=6), stop=stop_after_attempt(3),
151 | retry=retry_if_exception_type(Exception))
152 | def create_directory(self, directory_name, dest_id):
153 | file_metadata = {
154 | "name": directory_name,
155 | "description": "Uploaded by Mirror-leech-switch-bot",
156 | "mimeType": self.G_DRIVE_DIR_MIME_TYPE
157 | }
158 | if dest_id is not None:
159 | file_metadata["parents"] = [dest_id]
160 | file = self.service.files().create(
161 | body=file_metadata, supportsAllDrives=True).execute()
162 | file_id = file.get("id")
163 | if not config_dict['IS_TEAM_DRIVE']:
164 | self.set_permission(file_id)
165 | LOGGER.info(
166 | f'Created G-Drive Folder:\nName: {file.get("name")}\nID: {file_id}')
167 | return file_id
168 |
169 | def escapes(self, estr):
170 | chars = ['\\', "'", '"', r'\a', r'\b', r'\f', r'\n', r'\r', r'\t']
171 | for char in chars:
172 | estr = estr.replace(char, f'\\{char}')
173 | return estr.strip()
174 |
175 | '''
176 | def get_recursive_list(self, file, rootId):
177 | rtnlist = []
178 | if not rootId:
179 | rootId = file.get('teamDriveId')
180 | if rootId == "root":
181 | rootId = self.service.files().get(
182 | fileId='root', fields='id').execute().get('id')
183 | x = file.get("name")
184 | y = file.get("id")
185 | while (y != rootId):
186 | rtnlist.append(x)
187 | file = self.service.files().get(fileId=file.get("parents")[0], supportsAllDrives=True,
188 | fields='id, name, parents').execute()
189 | x = file.get("name")
190 | y = file.get("id")
191 | rtnlist.reverse()
192 | return rtnlist
193 | '''
194 |
195 | async def cancel_download(self):
196 | self.is_cancelled = True
197 | if self.is_downloading:
198 | LOGGER.info(f"Cancelling Download: {self.name}")
199 | await self.listener.onDownloadError('Download stopped by user!')
200 | elif self.is_cloning:
201 | LOGGER.info(f"Cancelling Clone: {self.name}")
202 | await self.listener.onUploadError('your clone has been stopped and cloned data has been deleted!')
203 | elif self.is_uploading:
204 | LOGGER.info(f"Cancelling Upload: {self.name}")
205 | await self.listener.onUploadError('your upload has been stopped and uploaded data has been deleted!')
206 |
--------------------------------------------------------------------------------
/bot/helper/mirror_utils/gdrive_utlis/upload.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python3
2 | from logging import getLogger
3 | from os import path as ospath, listdir, remove as osremove
4 | from googleapiclient.errors import HttpError
5 | from googleapiclient.http import MediaFileUpload
6 | from tenacity import retry, wait_exponential, stop_after_attempt, retry_if_exception_type, RetryError
7 |
8 | from bot import config_dict, GLOBAL_EXTENSION_FILTER
9 | from bot.helper.ext_utils.fs_utils import get_mime_type
10 | from bot.helper.ext_utils.bot_utils import async_to_sync, setInterval
11 | from bot.helper.mirror_utils.gdrive_utlis.helper import GoogleDriveHelper
12 |
13 | LOGGER = getLogger(__name__)
14 |
15 |
16 | class gdUpload(GoogleDriveHelper):
17 |
18 | def __init__(self, name, path, listener):
19 | super().__init__(listener, name)
20 | self.__updater = None
21 | self.__path = path
22 | self.__is_errored = False
23 | self.is_uploading = True
24 |
25 | def user_setting(self):
26 | if self.listener.upDest.startswith('mtp:'):
27 | self.token_path = f'tokens/{self.listener.user_id}.pickle'
28 | self.listener.upDest = self.listener.upDest.lstrip('mtp:')
29 | self.use_sa = False
30 |
31 | def upload(self, size):
32 | self.user_setting()
33 | self.service = self.authorize()
34 | item_path = f"{self.__path}/{self.name}"
35 | LOGGER.info(f"Uploading: {item_path}")
36 | self.__updater = setInterval(self.update_interval, self.progress)
37 | try:
38 | if self.listener.user_dict.get('excluded_extensions', False):
39 | extension_filter = self.listener.user_dict['excluded_extensions']
40 | elif 'excluded_extensions' not in self.listener.user_dict:
41 | extension_filter = GLOBAL_EXTENSION_FILTER
42 | else:
43 | extension_filter = ['aria2', '!qB']
44 | if ospath.isfile(item_path):
45 | if item_path.lower().endswith(tuple(extension_filter)):
46 | raise Exception(
47 | 'This file extension is excluded by extension filter!')
48 | mime_type = get_mime_type(item_path)
49 | link = self.__upload_file(
50 | item_path, self.name, mime_type, self.listener.upDest, is_dir=False)
51 | if self.is_cancelled:
52 | return
53 | if link is None:
54 | raise Exception('Upload has been manually cancelled')
55 | LOGGER.info(f"Uploaded To G-Drive: {item_path}")
56 | else:
57 | mime_type = 'Folder'
58 | dir_id = self.create_directory(ospath.basename(
59 | ospath.abspath(self.name)), self.listener.upDest)
60 | result = self.__upload_dir(item_path, dir_id, extension_filter)
61 | if result is None:
62 | raise Exception('Upload has been manually cancelled!')
63 | link = self.G_DRIVE_DIR_BASE_DOWNLOAD_URL.format(dir_id)
64 | if self.is_cancelled:
65 | return
66 | LOGGER.info(f"Uploaded To G-Drive: {self.name}")
67 | except Exception as err:
68 | if isinstance(err, RetryError):
69 | LOGGER.info(
70 | f"Total Attempts: {err.last_attempt.attempt_number}")
71 | err = err.last_attempt.exception()
72 | err = str(err).replace('>', '').replace('<', '')
73 | async_to_sync(self.listener.onUploadError, err)
74 | self.__is_errored = True
75 | finally:
76 | self.__updater.cancel()
77 | if self.is_cancelled and not self.__is_errored:
78 | if mime_type == 'Folder':
79 | LOGGER.info("Deleting uploaded data from Drive...")
80 | self.service.files().delete(fileId=dir_id, supportsAllDrives=True).execute()
81 | return
82 | elif self.__is_errored:
83 | return
84 | async_to_sync(self.listener.onUploadComplete, link, size, self.total_files,
85 | self.total_folders, mime_type, self.name,
86 | dir_id=self.getIdFromUrl(link, self.listener.user_id),
87 | private=self.token_path.startswith('tokens/'))
88 |
89 | def __upload_dir(self, input_directory, dest_id, extension_filter):
90 | list_dirs = listdir(input_directory)
91 | if len(list_dirs) == 0:
92 | return dest_id
93 | new_id = None
94 | for item in list_dirs:
95 | current_file_name = ospath.join(input_directory, item)
96 | if ospath.isdir(current_file_name):
97 | current_dir_id = self.create_directory(item, dest_id)
98 | new_id = self.__upload_dir(current_file_name, current_dir_id, extension_filter)
99 | self.total_folders += 1
100 | elif not item.lower().endswith(tuple(extension_filter)):
101 | mime_type = get_mime_type(current_file_name)
102 | file_name = current_file_name.split("/")[-1]
103 | # current_file_name will have the full path
104 | self.__upload_file(current_file_name,
105 | file_name, mime_type, dest_id)
106 | self.total_files += 1
107 | new_id = dest_id
108 | else:
109 | if not self.listener.seed or self.listener.newDir:
110 | osremove(current_file_name)
111 | new_id = 'filter'
112 | if self.is_cancelled:
113 | break
114 | return new_id
115 |
116 | @retry(wait=wait_exponential(multiplier=2, min=3, max=6), stop=stop_after_attempt(3),
117 | retry=(retry_if_exception_type(Exception)))
118 | def __upload_file(self, file_path, file_name, mime_type, dest_id, is_dir=True):
119 | # File body description
120 | file_metadata = {
121 | 'name': file_name,
122 | 'description': 'Uploaded by Mirror-leech-switch-bot',
123 | 'mimeType': mime_type,
124 | }
125 | if dest_id is not None:
126 | file_metadata['parents'] = [dest_id]
127 |
128 | if ospath.getsize(file_path) == 0:
129 | media_body = MediaFileUpload(file_path,
130 | mimetype=mime_type,
131 | resumable=False)
132 | response = self.service.files().create(body=file_metadata, media_body=media_body,
133 | supportsAllDrives=True).execute()
134 | if not config_dict['IS_TEAM_DRIVE']:
135 | self.set_permission(response['id'])
136 |
137 | drive_file = self.service.files().get(
138 | fileId=response['id'], supportsAllDrives=True).execute()
139 | return self.G_DRIVE_BASE_DOWNLOAD_URL.format(drive_file.get('id'))
140 | media_body = MediaFileUpload(file_path,
141 | mimetype=mime_type,
142 | resumable=True,
143 | chunksize=100 * 1024 * 1024)
144 |
145 | # Insert a file
146 | drive_file = self.service.files().create(
147 | body=file_metadata, media_body=media_body, supportsAllDrives=True)
148 | response = None
149 | retries = 0
150 | while response is None and not self.is_cancelled:
151 | try:
152 | self.status, response = drive_file.next_chunk()
153 | except HttpError as err:
154 | if err.resp.status in [500, 502, 503, 504] and retries < 10:
155 | retries += 1
156 | continue
157 | if err.resp.get('content-type', '').startswith('application/json'):
158 | reason = eval(err.content).get(
159 | 'error').get('errors')[0].get('reason')
160 | if reason not in [
161 | 'userRateLimitExceeded',
162 | 'dailyLimitExceeded',
163 | ]:
164 | raise err
165 | if self.use_sa:
166 | if self.sa_count >= self.sa_number:
167 | LOGGER.info(
168 | f"Reached maximum number of service accounts switching, which is {self.__sa_count}")
169 | raise err
170 | else:
171 | if self.is_cancelled:
172 | return
173 | self.switchServiceAccount()
174 | LOGGER.info(f"Got: {reason}, Trying Again.")
175 | return self.__upload_file(file_path, file_name, mime_type, dest_id)
176 | else:
177 | LOGGER.error(f"Got: {reason}")
178 | raise err
179 | if self.is_cancelled:
180 | return
181 | if not self.listener.seed or self.listener.newDir:
182 | try:
183 | osremove(file_path)
184 | except:
185 | pass
186 | self.file_processed_bytes = 0
187 | # Insert new permissions
188 | if not config_dict['IS_TEAM_DRIVE']:
189 | self.set_permission(response['id'])
190 | # Define file instance and get url for download
191 | if not is_dir:
192 | drive_file = self.service.files().get(
193 | fileId=response['id'], supportsAllDrives=True).execute()
194 | return self.G_DRIVE_BASE_DOWNLOAD_URL.format(drive_file.get('id'))
195 | return
196 |
--------------------------------------------------------------------------------